Commit | Line | Data |
---|---|---|
3d14c5d2 | 1 | #include <linux/ceph/ceph_debug.h> |
f24e9980 | 2 | |
3d14c5d2 | 3 | #include <linux/module.h> |
f24e9980 SW |
4 | #include <linux/err.h> |
5 | #include <linux/highmem.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/pagemap.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/uaccess.h> | |
68b4476b YS |
10 | #ifdef CONFIG_BLOCK |
11 | #include <linux/bio.h> | |
12 | #endif | |
f24e9980 | 13 | |
3d14c5d2 YS |
14 | #include <linux/ceph/libceph.h> |
15 | #include <linux/ceph/osd_client.h> | |
16 | #include <linux/ceph/messenger.h> | |
17 | #include <linux/ceph/decode.h> | |
18 | #include <linux/ceph/auth.h> | |
19 | #include <linux/ceph/pagelist.h> | |
f24e9980 | 20 | |
c16e7869 SW |
21 | #define OSD_OP_FRONT_LEN 4096 |
22 | #define OSD_OPREPLY_FRONT_LEN 512 | |
0d59ab81 | 23 | |
9e32789f | 24 | static const struct ceph_connection_operations osd_con_ops; |
f24e9980 | 25 | |
f9d25199 | 26 | static void __send_queued(struct ceph_osd_client *osdc); |
6f6c7006 | 27 | static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd); |
a40c4f10 YS |
28 | static void __register_request(struct ceph_osd_client *osdc, |
29 | struct ceph_osd_request *req); | |
30 | static void __unregister_linger_request(struct ceph_osd_client *osdc, | |
31 | struct ceph_osd_request *req); | |
56e925b6 SW |
32 | static void __send_request(struct ceph_osd_client *osdc, |
33 | struct ceph_osd_request *req); | |
f24e9980 SW |
34 | |
35 | /* | |
36 | * Implement client access to distributed object storage cluster. | |
37 | * | |
38 | * All data objects are stored within a cluster/cloud of OSDs, or | |
39 | * "object storage devices." (Note that Ceph OSDs have _nothing_ to | |
40 | * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply | |
41 | * remote daemons serving up and coordinating consistent and safe | |
42 | * access to storage. | |
43 | * | |
44 | * Cluster membership and the mapping of data objects onto storage devices | |
45 | * are described by the osd map. | |
46 | * | |
47 | * We keep track of pending OSD requests (read, write), resubmit | |
48 | * requests to different OSDs when the cluster topology/data layout | |
49 | * change, or retry the affected requests when the communications | |
50 | * channel with an OSD is reset. | |
51 | */ | |
52 | ||
53 | /* | |
54 | * calculate the mapping of a file extent onto an object, and fill out the | |
55 | * request accordingly. shorten extent as necessary if it crosses an | |
56 | * object boundary. | |
57 | * | |
58 | * fill osd op in request message. | |
59 | */ | |
dbe0fc41 | 60 | static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, |
a19dadfb | 61 | u64 *objnum, u64 *objoff, u64 *objlen) |
f24e9980 | 62 | { |
60e56f13 | 63 | u64 orig_len = *plen; |
d63b77f4 | 64 | int r; |
f24e9980 | 65 | |
60e56f13 | 66 | /* object extent? */ |
75d1c941 AE |
67 | r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum, |
68 | objoff, objlen); | |
d63b77f4 SW |
69 | if (r < 0) |
70 | return r; | |
75d1c941 AE |
71 | if (*objlen < orig_len) { |
72 | *plen = *objlen; | |
60e56f13 AE |
73 | dout(" skipping last %llu, final file extent %llu~%llu\n", |
74 | orig_len - *plen, off, *plen); | |
75 | } | |
76 | ||
75d1c941 | 77 | dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); |
f24e9980 | 78 | |
3ff5f385 | 79 | return 0; |
f24e9980 SW |
80 | } |
81 | ||
c54d47bf AE |
82 | static void ceph_osd_data_init(struct ceph_osd_data *osd_data) |
83 | { | |
84 | memset(osd_data, 0, sizeof (*osd_data)); | |
85 | osd_data->type = CEPH_OSD_DATA_TYPE_NONE; | |
86 | } | |
87 | ||
43bfe5de AE |
88 | void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, |
89 | struct page **pages, u64 length, u32 alignment, | |
90 | bool pages_from_pool, bool own_pages) | |
91 | { | |
92 | osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; | |
93 | osd_data->pages = pages; | |
94 | osd_data->length = length; | |
95 | osd_data->alignment = alignment; | |
96 | osd_data->pages_from_pool = pages_from_pool; | |
97 | osd_data->own_pages = own_pages; | |
98 | } | |
99 | EXPORT_SYMBOL(ceph_osd_data_pages_init); | |
100 | ||
101 | void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, | |
102 | struct ceph_pagelist *pagelist) | |
103 | { | |
104 | osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; | |
105 | osd_data->pagelist = pagelist; | |
106 | } | |
107 | EXPORT_SYMBOL(ceph_osd_data_pagelist_init); | |
108 | ||
109 | #ifdef CONFIG_BLOCK | |
110 | void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, | |
111 | struct bio *bio, size_t bio_length) | |
112 | { | |
113 | osd_data->type = CEPH_OSD_DATA_TYPE_BIO; | |
114 | osd_data->bio = bio; | |
115 | osd_data->bio_length = bio_length; | |
116 | } | |
117 | EXPORT_SYMBOL(ceph_osd_data_bio_init); | |
118 | #endif /* CONFIG_BLOCK */ | |
119 | ||
c54d47bf AE |
120 | static void ceph_osd_data_release(struct ceph_osd_data *osd_data) |
121 | { | |
122 | if (osd_data->type != CEPH_OSD_DATA_TYPE_PAGES) | |
123 | return; | |
124 | ||
125 | if (osd_data->own_pages) { | |
126 | int num_pages; | |
127 | ||
128 | num_pages = calc_pages_for((u64)osd_data->alignment, | |
129 | (u64)osd_data->length); | |
130 | ceph_release_page_vector(osd_data->pages, num_pages); | |
131 | } | |
132 | } | |
133 | ||
f24e9980 SW |
134 | /* |
135 | * requests | |
136 | */ | |
415e49a9 | 137 | void ceph_osdc_release_request(struct kref *kref) |
f24e9980 | 138 | { |
c54d47bf | 139 | struct ceph_osd_request *req; |
415e49a9 | 140 | |
c54d47bf | 141 | req = container_of(kref, struct ceph_osd_request, r_kref); |
415e49a9 SW |
142 | if (req->r_request) |
143 | ceph_msg_put(req->r_request); | |
ace6d3a9 | 144 | if (req->r_reply) { |
8921d114 | 145 | ceph_msg_revoke_incoming(req->r_reply); |
ab8cb34a | 146 | ceph_msg_put(req->r_reply); |
ace6d3a9 | 147 | } |
0fff87ec | 148 | |
c54d47bf AE |
149 | ceph_osd_data_release(&req->r_data_in); |
150 | ceph_osd_data_release(&req->r_data_out); | |
0fff87ec | 151 | |
415e49a9 SW |
152 | ceph_put_snap_context(req->r_snapc); |
153 | if (req->r_mempool) | |
154 | mempool_free(req, req->r_osdc->req_mempool); | |
155 | else | |
156 | kfree(req); | |
f24e9980 | 157 | } |
3d14c5d2 | 158 | EXPORT_SYMBOL(ceph_osdc_release_request); |
68b4476b | 159 | |
3499e8a5 | 160 | struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, |
f24e9980 | 161 | struct ceph_snap_context *snapc, |
1b83bef2 | 162 | unsigned int num_ops, |
3499e8a5 | 163 | bool use_mempool, |
54a54007 | 164 | gfp_t gfp_flags) |
f24e9980 SW |
165 | { |
166 | struct ceph_osd_request *req; | |
167 | struct ceph_msg *msg; | |
1b83bef2 SW |
168 | size_t msg_size; |
169 | ||
170 | msg_size = 4 + 4 + 8 + 8 + 4+8; | |
171 | msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ | |
172 | msg_size += 1 + 8 + 4 + 4; /* pg_t */ | |
173 | msg_size += 4 + MAX_OBJ_NAME_SIZE; | |
174 | msg_size += 2 + num_ops*sizeof(struct ceph_osd_op); | |
175 | msg_size += 8; /* snapid */ | |
176 | msg_size += 8; /* snap_seq */ | |
177 | msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */ | |
178 | msg_size += 4; | |
f24e9980 SW |
179 | |
180 | if (use_mempool) { | |
3499e8a5 | 181 | req = mempool_alloc(osdc->req_mempool, gfp_flags); |
f24e9980 SW |
182 | memset(req, 0, sizeof(*req)); |
183 | } else { | |
3499e8a5 | 184 | req = kzalloc(sizeof(*req), gfp_flags); |
f24e9980 SW |
185 | } |
186 | if (req == NULL) | |
a79832f2 | 187 | return NULL; |
f24e9980 | 188 | |
f24e9980 SW |
189 | req->r_osdc = osdc; |
190 | req->r_mempool = use_mempool; | |
68b4476b | 191 | |
415e49a9 | 192 | kref_init(&req->r_kref); |
f24e9980 SW |
193 | init_completion(&req->r_completion); |
194 | init_completion(&req->r_safe_completion); | |
a978fa20 | 195 | RB_CLEAR_NODE(&req->r_node); |
f24e9980 | 196 | INIT_LIST_HEAD(&req->r_unsafe_item); |
a40c4f10 YS |
197 | INIT_LIST_HEAD(&req->r_linger_item); |
198 | INIT_LIST_HEAD(&req->r_linger_osd); | |
935b639a | 199 | INIT_LIST_HEAD(&req->r_req_lru_item); |
cd43045c SW |
200 | INIT_LIST_HEAD(&req->r_osd_item); |
201 | ||
c16e7869 SW |
202 | /* create reply message */ |
203 | if (use_mempool) | |
204 | msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); | |
205 | else | |
206 | msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, | |
b61c2763 | 207 | OSD_OPREPLY_FRONT_LEN, gfp_flags, true); |
a79832f2 | 208 | if (!msg) { |
c16e7869 | 209 | ceph_osdc_put_request(req); |
a79832f2 | 210 | return NULL; |
c16e7869 SW |
211 | } |
212 | req->r_reply = msg; | |
213 | ||
c54d47bf AE |
214 | ceph_osd_data_init(&req->r_data_in); |
215 | ceph_osd_data_init(&req->r_data_out); | |
d50b409f | 216 | |
c16e7869 | 217 | /* create request message; allow space for oid */ |
f24e9980 | 218 | if (use_mempool) |
8f3bc053 | 219 | msg = ceph_msgpool_get(&osdc->msgpool_op, 0); |
f24e9980 | 220 | else |
b61c2763 | 221 | msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true); |
a79832f2 | 222 | if (!msg) { |
f24e9980 | 223 | ceph_osdc_put_request(req); |
a79832f2 | 224 | return NULL; |
f24e9980 | 225 | } |
68b4476b | 226 | |
f24e9980 | 227 | memset(msg->front.iov_base, 0, msg->front.iov_len); |
3499e8a5 YS |
228 | |
229 | req->r_request = msg; | |
3499e8a5 YS |
230 | |
231 | return req; | |
232 | } | |
3d14c5d2 | 233 | EXPORT_SYMBOL(ceph_osdc_alloc_request); |
3499e8a5 | 234 | |
a8dd0a37 | 235 | static bool osd_req_opcode_valid(u16 opcode) |
68b4476b | 236 | { |
a8dd0a37 | 237 | switch (opcode) { |
68b4476b | 238 | case CEPH_OSD_OP_READ: |
a8dd0a37 | 239 | case CEPH_OSD_OP_STAT: |
4c46459c AE |
240 | case CEPH_OSD_OP_MAPEXT: |
241 | case CEPH_OSD_OP_MASKTRUNC: | |
242 | case CEPH_OSD_OP_SPARSE_READ: | |
a9f36c3e | 243 | case CEPH_OSD_OP_NOTIFY: |
a8dd0a37 | 244 | case CEPH_OSD_OP_NOTIFY_ACK: |
4c46459c | 245 | case CEPH_OSD_OP_ASSERT_VER: |
a8dd0a37 | 246 | case CEPH_OSD_OP_WRITE: |
4c46459c AE |
247 | case CEPH_OSD_OP_WRITEFULL: |
248 | case CEPH_OSD_OP_TRUNCATE: | |
249 | case CEPH_OSD_OP_ZERO: | |
250 | case CEPH_OSD_OP_DELETE: | |
251 | case CEPH_OSD_OP_APPEND: | |
a8dd0a37 | 252 | case CEPH_OSD_OP_STARTSYNC: |
4c46459c AE |
253 | case CEPH_OSD_OP_SETTRUNC: |
254 | case CEPH_OSD_OP_TRIMTRUNC: | |
255 | case CEPH_OSD_OP_TMAPUP: | |
256 | case CEPH_OSD_OP_TMAPPUT: | |
257 | case CEPH_OSD_OP_TMAPGET: | |
258 | case CEPH_OSD_OP_CREATE: | |
a9f36c3e | 259 | case CEPH_OSD_OP_ROLLBACK: |
a8dd0a37 | 260 | case CEPH_OSD_OP_WATCH: |
4c46459c AE |
261 | case CEPH_OSD_OP_OMAPGETKEYS: |
262 | case CEPH_OSD_OP_OMAPGETVALS: | |
263 | case CEPH_OSD_OP_OMAPGETHEADER: | |
264 | case CEPH_OSD_OP_OMAPGETVALSBYKEYS: | |
4c46459c AE |
265 | case CEPH_OSD_OP_OMAPSETVALS: |
266 | case CEPH_OSD_OP_OMAPSETHEADER: | |
267 | case CEPH_OSD_OP_OMAPCLEAR: | |
268 | case CEPH_OSD_OP_OMAPRMKEYS: | |
269 | case CEPH_OSD_OP_OMAP_CMP: | |
270 | case CEPH_OSD_OP_CLONERANGE: | |
271 | case CEPH_OSD_OP_ASSERT_SRC_VERSION: | |
272 | case CEPH_OSD_OP_SRC_CMPXATTR: | |
a9f36c3e | 273 | case CEPH_OSD_OP_GETXATTR: |
4c46459c | 274 | case CEPH_OSD_OP_GETXATTRS: |
a9f36c3e AE |
275 | case CEPH_OSD_OP_CMPXATTR: |
276 | case CEPH_OSD_OP_SETXATTR: | |
4c46459c AE |
277 | case CEPH_OSD_OP_SETXATTRS: |
278 | case CEPH_OSD_OP_RESETXATTRS: | |
279 | case CEPH_OSD_OP_RMXATTR: | |
280 | case CEPH_OSD_OP_PULL: | |
281 | case CEPH_OSD_OP_PUSH: | |
282 | case CEPH_OSD_OP_BALANCEREADS: | |
283 | case CEPH_OSD_OP_UNBALANCEREADS: | |
284 | case CEPH_OSD_OP_SCRUB: | |
285 | case CEPH_OSD_OP_SCRUB_RESERVE: | |
286 | case CEPH_OSD_OP_SCRUB_UNRESERVE: | |
287 | case CEPH_OSD_OP_SCRUB_STOP: | |
288 | case CEPH_OSD_OP_SCRUB_MAP: | |
289 | case CEPH_OSD_OP_WRLOCK: | |
290 | case CEPH_OSD_OP_WRUNLOCK: | |
291 | case CEPH_OSD_OP_RDLOCK: | |
292 | case CEPH_OSD_OP_RDUNLOCK: | |
293 | case CEPH_OSD_OP_UPLOCK: | |
294 | case CEPH_OSD_OP_DNLOCK: | |
a8dd0a37 | 295 | case CEPH_OSD_OP_CALL: |
4c46459c AE |
296 | case CEPH_OSD_OP_PGLS: |
297 | case CEPH_OSD_OP_PGLS_FILTER: | |
a8dd0a37 AE |
298 | return true; |
299 | default: | |
300 | return false; | |
301 | } | |
302 | } | |
303 | ||
33803f33 AE |
304 | /* |
305 | * This is an osd op init function for opcodes that have no data or | |
306 | * other information associated with them. It also serves as a | |
307 | * common init routine for all the other init functions, below. | |
308 | */ | |
309 | void osd_req_op_init(struct ceph_osd_req_op *op, u16 opcode) | |
310 | { | |
311 | BUG_ON(!osd_req_opcode_valid(opcode)); | |
312 | ||
313 | memset(op, 0, sizeof (*op)); | |
314 | ||
315 | op->op = opcode; | |
316 | } | |
317 | ||
318 | void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode, | |
319 | u64 offset, u64 length, | |
320 | u64 truncate_size, u32 truncate_seq) | |
321 | { | |
322 | size_t payload_len = 0; | |
323 | ||
324 | BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); | |
325 | ||
326 | osd_req_op_init(op, opcode); | |
327 | ||
328 | op->extent.offset = offset; | |
329 | op->extent.length = length; | |
330 | op->extent.truncate_size = truncate_size; | |
331 | op->extent.truncate_seq = truncate_seq; | |
332 | if (opcode == CEPH_OSD_OP_WRITE) | |
333 | payload_len += length; | |
334 | ||
335 | op->payload_len = payload_len; | |
336 | } | |
337 | EXPORT_SYMBOL(osd_req_op_extent_init); | |
338 | ||
e5975c7c AE |
339 | void osd_req_op_extent_update(struct ceph_osd_req_op *op, u64 length) |
340 | { | |
341 | u64 previous = op->extent.length; | |
342 | ||
343 | if (length == previous) | |
344 | return; /* Nothing to do */ | |
345 | BUG_ON(length > previous); | |
346 | ||
347 | op->extent.length = length; | |
348 | op->payload_len -= previous - length; | |
349 | } | |
350 | EXPORT_SYMBOL(osd_req_op_extent_update); | |
351 | ||
33803f33 AE |
352 | void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, |
353 | const char *class, const char *method, | |
354 | const void *request_data, size_t request_data_size) | |
355 | { | |
356 | size_t payload_len = 0; | |
357 | size_t size; | |
358 | ||
359 | BUG_ON(opcode != CEPH_OSD_OP_CALL); | |
360 | ||
361 | osd_req_op_init(op, opcode); | |
362 | ||
363 | op->cls.class_name = class; | |
364 | size = strlen(class); | |
365 | BUG_ON(size > (size_t) U8_MAX); | |
366 | op->cls.class_len = size; | |
367 | payload_len += size; | |
368 | ||
369 | op->cls.method_name = method; | |
370 | size = strlen(method); | |
371 | BUG_ON(size > (size_t) U8_MAX); | |
372 | op->cls.method_len = size; | |
373 | payload_len += size; | |
374 | ||
375 | op->cls.indata = request_data; | |
376 | BUG_ON(request_data_size > (size_t) U32_MAX); | |
377 | op->cls.indata_len = (u32) request_data_size; | |
378 | payload_len += request_data_size; | |
379 | ||
380 | op->cls.argc = 0; /* currently unused */ | |
381 | ||
382 | op->payload_len = payload_len; | |
383 | } | |
384 | EXPORT_SYMBOL(osd_req_op_cls_init); | |
385 | ||
386 | void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode, | |
387 | u64 cookie, u64 version, int flag) | |
388 | { | |
389 | BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); | |
390 | ||
391 | osd_req_op_init(op, opcode); | |
392 | ||
393 | op->watch.cookie = cookie; | |
394 | /* op->watch.ver = version; */ /* XXX 3847 */ | |
395 | op->watch.ver = cpu_to_le64(version); | |
396 | if (opcode == CEPH_OSD_OP_WATCH && flag) | |
397 | op->watch.flag = (u8) 1; | |
398 | } | |
399 | EXPORT_SYMBOL(osd_req_op_watch_init); | |
400 | ||
a8dd0a37 AE |
401 | static u64 osd_req_encode_op(struct ceph_osd_request *req, |
402 | struct ceph_osd_op *dst, | |
403 | struct ceph_osd_req_op *src) | |
404 | { | |
405 | u64 out_data_len = 0; | |
406 | struct ceph_pagelist *pagelist; | |
407 | ||
408 | if (WARN_ON(!osd_req_opcode_valid(src->op))) { | |
409 | pr_err("unrecognized osd opcode %d\n", src->op); | |
410 | ||
411 | return 0; | |
412 | } | |
413 | ||
414 | switch (src->op) { | |
415 | case CEPH_OSD_OP_STAT: | |
416 | break; | |
417 | case CEPH_OSD_OP_READ: | |
418 | case CEPH_OSD_OP_WRITE: | |
419 | if (src->op == CEPH_OSD_OP_WRITE) | |
420 | out_data_len = src->extent.length; | |
421 | dst->extent.offset = cpu_to_le64(src->extent.offset); | |
422 | dst->extent.length = cpu_to_le64(src->extent.length); | |
423 | dst->extent.truncate_size = | |
424 | cpu_to_le64(src->extent.truncate_size); | |
425 | dst->extent.truncate_seq = | |
426 | cpu_to_le32(src->extent.truncate_seq); | |
427 | break; | |
428 | case CEPH_OSD_OP_CALL: | |
429 | pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); | |
430 | BUG_ON(!pagelist); | |
431 | ceph_pagelist_init(pagelist); | |
432 | ||
433 | dst->cls.class_len = src->cls.class_len; | |
434 | dst->cls.method_len = src->cls.method_len; | |
435 | dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); | |
436 | ceph_pagelist_append(pagelist, src->cls.class_name, | |
437 | src->cls.class_len); | |
438 | ceph_pagelist_append(pagelist, src->cls.method_name, | |
439 | src->cls.method_len); | |
440 | ceph_pagelist_append(pagelist, src->cls.indata, | |
441 | src->cls.indata_len); | |
442 | ||
43bfe5de | 443 | ceph_osd_data_pagelist_init(&req->r_data_out, pagelist); |
a8dd0a37 AE |
444 | out_data_len = pagelist->length; |
445 | break; | |
446 | case CEPH_OSD_OP_STARTSYNC: | |
447 | break; | |
448 | case CEPH_OSD_OP_NOTIFY_ACK: | |
449 | case CEPH_OSD_OP_WATCH: | |
450 | dst->watch.cookie = cpu_to_le64(src->watch.cookie); | |
451 | dst->watch.ver = cpu_to_le64(src->watch.ver); | |
452 | dst->watch.flag = src->watch.flag; | |
453 | break; | |
454 | default: | |
4c46459c | 455 | pr_err("unsupported osd opcode %s\n", |
8f63ca2d | 456 | ceph_osd_op_name(src->op)); |
4c46459c | 457 | WARN_ON(1); |
a8dd0a37 AE |
458 | |
459 | return 0; | |
68b4476b | 460 | } |
a8dd0a37 | 461 | dst->op = cpu_to_le16(src->op); |
68b4476b | 462 | dst->payload_len = cpu_to_le32(src->payload_len); |
175face2 AE |
463 | |
464 | return out_data_len; | |
68b4476b YS |
465 | } |
466 | ||
3499e8a5 YS |
467 | /* |
468 | * build new request AND message | |
469 | * | |
470 | */ | |
471 | void ceph_osdc_build_request(struct ceph_osd_request *req, | |
175face2 | 472 | u64 off, unsigned int num_ops, |
68b4476b | 473 | struct ceph_osd_req_op *src_ops, |
4d6b250b | 474 | struct ceph_snap_context *snapc, u64 snap_id, |
af77f26c | 475 | struct timespec *mtime) |
3499e8a5 YS |
476 | { |
477 | struct ceph_msg *msg = req->r_request; | |
68b4476b | 478 | struct ceph_osd_req_op *src_op; |
3499e8a5 | 479 | void *p; |
1b83bef2 | 480 | size_t msg_size; |
3499e8a5 | 481 | int flags = req->r_flags; |
f44246e3 | 482 | u64 data_len; |
68b4476b | 483 | int i; |
3499e8a5 | 484 | |
1b83bef2 SW |
485 | req->r_num_ops = num_ops; |
486 | req->r_snapid = snap_id; | |
f24e9980 SW |
487 | req->r_snapc = ceph_get_snap_context(snapc); |
488 | ||
1b83bef2 SW |
489 | /* encode request */ |
490 | msg->hdr.version = cpu_to_le16(4); | |
491 | ||
492 | p = msg->front.iov_base; | |
493 | ceph_encode_32(&p, 1); /* client_inc is always 1 */ | |
494 | req->r_request_osdmap_epoch = p; | |
495 | p += 4; | |
496 | req->r_request_flags = p; | |
497 | p += 4; | |
498 | if (req->r_flags & CEPH_OSD_FLAG_WRITE) | |
499 | ceph_encode_timespec(p, mtime); | |
500 | p += sizeof(struct ceph_timespec); | |
501 | req->r_request_reassert_version = p; | |
502 | p += sizeof(struct ceph_eversion); /* will get filled in */ | |
503 | ||
504 | /* oloc */ | |
505 | ceph_encode_8(&p, 4); | |
506 | ceph_encode_8(&p, 4); | |
507 | ceph_encode_32(&p, 8 + 4 + 4); | |
508 | req->r_request_pool = p; | |
509 | p += 8; | |
510 | ceph_encode_32(&p, -1); /* preferred */ | |
511 | ceph_encode_32(&p, 0); /* key len */ | |
512 | ||
513 | ceph_encode_8(&p, 1); | |
514 | req->r_request_pgid = p; | |
515 | p += 8 + 4; | |
516 | ceph_encode_32(&p, -1); /* preferred */ | |
517 | ||
518 | /* oid */ | |
519 | ceph_encode_32(&p, req->r_oid_len); | |
af77f26c | 520 | memcpy(p, req->r_oid, req->r_oid_len); |
1b83bef2 | 521 | dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len); |
af77f26c | 522 | p += req->r_oid_len; |
f24e9980 | 523 | |
175face2 | 524 | /* ops--can imply data */ |
1b83bef2 | 525 | ceph_encode_16(&p, num_ops); |
68b4476b | 526 | src_op = src_ops; |
1b83bef2 | 527 | req->r_request_ops = p; |
175face2 | 528 | data_len = 0; |
1b83bef2 | 529 | for (i = 0; i < num_ops; i++, src_op++) { |
175face2 | 530 | data_len += osd_req_encode_op(req, p, src_op); |
1b83bef2 SW |
531 | p += sizeof(struct ceph_osd_op); |
532 | } | |
68b4476b | 533 | |
1b83bef2 SW |
534 | /* snaps */ |
535 | ceph_encode_64(&p, req->r_snapid); | |
536 | ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0); | |
537 | ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0); | |
538 | if (req->r_snapc) { | |
f24e9980 | 539 | for (i = 0; i < snapc->num_snaps; i++) { |
1b83bef2 | 540 | ceph_encode_64(&p, req->r_snapc->snaps[i]); |
f24e9980 SW |
541 | } |
542 | } | |
543 | ||
1b83bef2 SW |
544 | req->r_request_attempts = p; |
545 | p += 4; | |
546 | ||
175face2 | 547 | /* data */ |
0baa1bd9 AE |
548 | if (flags & CEPH_OSD_FLAG_WRITE) { |
549 | u16 data_off; | |
550 | ||
551 | /* | |
552 | * The header "data_off" is a hint to the receiver | |
553 | * allowing it to align received data into its | |
554 | * buffers such that there's no need to re-copy | |
555 | * it before writing it to disk (direct I/O). | |
556 | */ | |
557 | data_off = (u16) (off & 0xffff); | |
558 | req->r_request->hdr.data_off = cpu_to_le16(data_off); | |
559 | } | |
f44246e3 | 560 | req->r_request->hdr.data_len = cpu_to_le32(data_len); |
c5c6b19d | 561 | |
f24e9980 | 562 | BUG_ON(p > msg->front.iov_base + msg->front.iov_len); |
6f863e71 SW |
563 | msg_size = p - msg->front.iov_base; |
564 | msg->front.iov_len = msg_size; | |
565 | msg->hdr.front_len = cpu_to_le32(msg_size); | |
1b83bef2 | 566 | |
acead002 | 567 | dout("build_request msg_size was %d\n", (int)msg_size); |
3499e8a5 | 568 | } |
3d14c5d2 | 569 | EXPORT_SYMBOL(ceph_osdc_build_request); |
3499e8a5 YS |
570 | |
571 | /* | |
572 | * build new request AND message, calculate layout, and adjust file | |
573 | * extent as needed. | |
574 | * | |
575 | * if the file was recently truncated, we include information about its | |
576 | * old and new size so that the object can be updated appropriately. (we | |
577 | * avoid synchronously deleting truncated objects because it's slow.) | |
578 | * | |
579 | * if @do_sync, include a 'startsync' command so that the osd will flush | |
580 | * data quickly. | |
581 | */ | |
582 | struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |
583 | struct ceph_file_layout *layout, | |
584 | struct ceph_vino vino, | |
acead002 AE |
585 | u64 off, u64 *plen, int num_ops, |
586 | struct ceph_osd_req_op *ops, | |
3499e8a5 YS |
587 | int opcode, int flags, |
588 | struct ceph_snap_context *snapc, | |
3499e8a5 YS |
589 | u32 truncate_seq, |
590 | u64 truncate_size, | |
153e5167 | 591 | bool use_mempool) |
3499e8a5 | 592 | { |
68b4476b | 593 | struct ceph_osd_request *req; |
75d1c941 AE |
594 | u64 objnum = 0; |
595 | u64 objoff = 0; | |
596 | u64 objlen = 0; | |
d18d1e28 AE |
597 | u32 object_size; |
598 | u64 object_base; | |
6816282d | 599 | int r; |
68b4476b | 600 | |
d18d1e28 | 601 | BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); |
68b4476b | 602 | |
acead002 | 603 | req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, |
ae7ca4a3 | 604 | GFP_NOFS); |
4ad12621 | 605 | if (!req) |
6816282d | 606 | return ERR_PTR(-ENOMEM); |
d178a9e7 | 607 | req->r_flags = flags; |
3499e8a5 YS |
608 | |
609 | /* calculate max write size */ | |
a19dadfb | 610 | r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); |
3ff5f385 AE |
611 | if (r < 0) { |
612 | ceph_osdc_put_request(req); | |
6816282d | 613 | return ERR_PTR(r); |
3ff5f385 | 614 | } |
a19dadfb | 615 | |
d18d1e28 AE |
616 | object_size = le32_to_cpu(layout->fl_object_size); |
617 | object_base = off - objoff; | |
618 | if (truncate_size <= object_base) { | |
619 | truncate_size = 0; | |
620 | } else { | |
621 | truncate_size -= object_base; | |
622 | if (truncate_size > object_size) | |
623 | truncate_size = object_size; | |
a19dadfb | 624 | } |
d18d1e28 | 625 | |
b0270324 AE |
626 | osd_req_op_extent_init(&ops[0], opcode, objoff, objlen, |
627 | truncate_size, truncate_seq); | |
acead002 AE |
628 | /* |
629 | * A second op in the ops array means the caller wants to | |
630 | * also issue a include a 'startsync' command so that the | |
631 | * osd will flush data quickly. | |
632 | */ | |
633 | if (num_ops > 1) | |
b0270324 | 634 | osd_req_op_init(&ops[1], CEPH_OSD_OP_STARTSYNC); |
d18d1e28 | 635 | |
3499e8a5 YS |
636 | req->r_file_layout = *layout; /* keep a copy */ |
637 | ||
75d1c941 AE |
638 | snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", |
639 | vino.ino, objnum); | |
dbe0fc41 AE |
640 | req->r_oid_len = strlen(req->r_oid); |
641 | ||
f24e9980 SW |
642 | return req; |
643 | } | |
3d14c5d2 | 644 | EXPORT_SYMBOL(ceph_osdc_new_request); |
f24e9980 SW |
645 | |
646 | /* | |
647 | * We keep osd requests in an rbtree, sorted by ->r_tid. | |
648 | */ | |
649 | static void __insert_request(struct ceph_osd_client *osdc, | |
650 | struct ceph_osd_request *new) | |
651 | { | |
652 | struct rb_node **p = &osdc->requests.rb_node; | |
653 | struct rb_node *parent = NULL; | |
654 | struct ceph_osd_request *req = NULL; | |
655 | ||
656 | while (*p) { | |
657 | parent = *p; | |
658 | req = rb_entry(parent, struct ceph_osd_request, r_node); | |
659 | if (new->r_tid < req->r_tid) | |
660 | p = &(*p)->rb_left; | |
661 | else if (new->r_tid > req->r_tid) | |
662 | p = &(*p)->rb_right; | |
663 | else | |
664 | BUG(); | |
665 | } | |
666 | ||
667 | rb_link_node(&new->r_node, parent, p); | |
668 | rb_insert_color(&new->r_node, &osdc->requests); | |
669 | } | |
670 | ||
671 | static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc, | |
672 | u64 tid) | |
673 | { | |
674 | struct ceph_osd_request *req; | |
675 | struct rb_node *n = osdc->requests.rb_node; | |
676 | ||
677 | while (n) { | |
678 | req = rb_entry(n, struct ceph_osd_request, r_node); | |
679 | if (tid < req->r_tid) | |
680 | n = n->rb_left; | |
681 | else if (tid > req->r_tid) | |
682 | n = n->rb_right; | |
683 | else | |
684 | return req; | |
685 | } | |
686 | return NULL; | |
687 | } | |
688 | ||
689 | static struct ceph_osd_request * | |
690 | __lookup_request_ge(struct ceph_osd_client *osdc, | |
691 | u64 tid) | |
692 | { | |
693 | struct ceph_osd_request *req; | |
694 | struct rb_node *n = osdc->requests.rb_node; | |
695 | ||
696 | while (n) { | |
697 | req = rb_entry(n, struct ceph_osd_request, r_node); | |
698 | if (tid < req->r_tid) { | |
699 | if (!n->rb_left) | |
700 | return req; | |
701 | n = n->rb_left; | |
702 | } else if (tid > req->r_tid) { | |
703 | n = n->rb_right; | |
704 | } else { | |
705 | return req; | |
706 | } | |
707 | } | |
708 | return NULL; | |
709 | } | |
710 | ||
6f6c7006 SW |
711 | /* |
712 | * Resubmit requests pending on the given osd. | |
713 | */ | |
714 | static void __kick_osd_requests(struct ceph_osd_client *osdc, | |
715 | struct ceph_osd *osd) | |
716 | { | |
a40c4f10 | 717 | struct ceph_osd_request *req, *nreq; |
e02493c0 | 718 | LIST_HEAD(resend); |
6f6c7006 SW |
719 | int err; |
720 | ||
721 | dout("__kick_osd_requests osd%d\n", osd->o_osd); | |
722 | err = __reset_osd(osdc, osd); | |
685a7555 | 723 | if (err) |
6f6c7006 | 724 | return; |
e02493c0 AE |
725 | /* |
726 | * Build up a list of requests to resend by traversing the | |
727 | * osd's list of requests. Requests for a given object are | |
728 | * sent in tid order, and that is also the order they're | |
729 | * kept on this list. Therefore all requests that are in | |
730 | * flight will be found first, followed by all requests that | |
731 | * have not yet been sent. And to resend requests while | |
732 | * preserving this order we will want to put any sent | |
733 | * requests back on the front of the osd client's unsent | |
734 | * list. | |
735 | * | |
736 | * So we build a separate ordered list of already-sent | |
737 | * requests for the affected osd and splice it onto the | |
738 | * front of the osd client's unsent list. Once we've seen a | |
739 | * request that has not yet been sent we're done. Those | |
740 | * requests are already sitting right where they belong. | |
741 | */ | |
6f6c7006 | 742 | list_for_each_entry(req, &osd->o_requests, r_osd_item) { |
e02493c0 AE |
743 | if (!req->r_sent) |
744 | break; | |
745 | list_move_tail(&req->r_req_lru_item, &resend); | |
746 | dout("requeueing %p tid %llu osd%d\n", req, req->r_tid, | |
6f6c7006 | 747 | osd->o_osd); |
a40c4f10 YS |
748 | if (!req->r_linger) |
749 | req->r_flags |= CEPH_OSD_FLAG_RETRY; | |
750 | } | |
e02493c0 | 751 | list_splice(&resend, &osdc->req_unsent); |
a40c4f10 | 752 | |
e02493c0 AE |
753 | /* |
754 | * Linger requests are re-registered before sending, which | |
755 | * sets up a new tid for each. We add them to the unsent | |
756 | * list at the end to keep things in tid order. | |
757 | */ | |
a40c4f10 YS |
758 | list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, |
759 | r_linger_osd) { | |
77f38e0e SW |
760 | /* |
761 | * reregister request prior to unregistering linger so | |
762 | * that r_osd is preserved. | |
763 | */ | |
764 | BUG_ON(!list_empty(&req->r_req_lru_item)); | |
a40c4f10 | 765 | __register_request(osdc, req); |
e02493c0 | 766 | list_add_tail(&req->r_req_lru_item, &osdc->req_unsent); |
ad885927 | 767 | list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); |
77f38e0e | 768 | __unregister_linger_request(osdc, req); |
a40c4f10 YS |
769 | dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, |
770 | osd->o_osd); | |
6f6c7006 SW |
771 | } |
772 | } | |
773 | ||
f24e9980 | 774 | /* |
81b024e7 | 775 | * If the osd connection drops, we need to resubmit all requests. |
f24e9980 SW |
776 | */ |
777 | static void osd_reset(struct ceph_connection *con) | |
778 | { | |
779 | struct ceph_osd *osd = con->private; | |
780 | struct ceph_osd_client *osdc; | |
781 | ||
782 | if (!osd) | |
783 | return; | |
784 | dout("osd_reset osd%d\n", osd->o_osd); | |
785 | osdc = osd->o_osdc; | |
f24e9980 | 786 | down_read(&osdc->map_sem); |
83aff95e SW |
787 | mutex_lock(&osdc->request_mutex); |
788 | __kick_osd_requests(osdc, osd); | |
f9d25199 | 789 | __send_queued(osdc); |
83aff95e | 790 | mutex_unlock(&osdc->request_mutex); |
f24e9980 SW |
791 | up_read(&osdc->map_sem); |
792 | } | |
793 | ||
794 | /* | |
795 | * Track open sessions with osds. | |
796 | */ | |
e10006f8 | 797 | static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) |
f24e9980 SW |
798 | { |
799 | struct ceph_osd *osd; | |
800 | ||
801 | osd = kzalloc(sizeof(*osd), GFP_NOFS); | |
802 | if (!osd) | |
803 | return NULL; | |
804 | ||
805 | atomic_set(&osd->o_ref, 1); | |
806 | osd->o_osdc = osdc; | |
e10006f8 | 807 | osd->o_osd = onum; |
f407731d | 808 | RB_CLEAR_NODE(&osd->o_node); |
f24e9980 | 809 | INIT_LIST_HEAD(&osd->o_requests); |
a40c4f10 | 810 | INIT_LIST_HEAD(&osd->o_linger_requests); |
f5a2041b | 811 | INIT_LIST_HEAD(&osd->o_osd_lru); |
f24e9980 SW |
812 | osd->o_incarnation = 1; |
813 | ||
b7a9e5dd | 814 | ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); |
4e7a5dcd | 815 | |
422d2cb8 | 816 | INIT_LIST_HEAD(&osd->o_keepalive_item); |
f24e9980 SW |
817 | return osd; |
818 | } | |
819 | ||
820 | static struct ceph_osd *get_osd(struct ceph_osd *osd) | |
821 | { | |
822 | if (atomic_inc_not_zero(&osd->o_ref)) { | |
823 | dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1, | |
824 | atomic_read(&osd->o_ref)); | |
825 | return osd; | |
826 | } else { | |
827 | dout("get_osd %p FAIL\n", osd); | |
828 | return NULL; | |
829 | } | |
830 | } | |
831 | ||
832 | static void put_osd(struct ceph_osd *osd) | |
833 | { | |
834 | dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), | |
835 | atomic_read(&osd->o_ref) - 1); | |
a255651d | 836 | if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { |
79494d1b SW |
837 | struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; |
838 | ||
27859f97 | 839 | ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); |
f24e9980 | 840 | kfree(osd); |
79494d1b | 841 | } |
f24e9980 SW |
842 | } |
843 | ||
844 | /* | |
845 | * remove an osd from our map | |
846 | */ | |
f5a2041b | 847 | static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) |
f24e9980 | 848 | { |
f5a2041b | 849 | dout("__remove_osd %p\n", osd); |
f24e9980 SW |
850 | BUG_ON(!list_empty(&osd->o_requests)); |
851 | rb_erase(&osd->o_node, &osdc->osds); | |
f5a2041b | 852 | list_del_init(&osd->o_osd_lru); |
f24e9980 SW |
853 | ceph_con_close(&osd->o_con); |
854 | put_osd(osd); | |
855 | } | |
856 | ||
aca420bc SW |
857 | static void remove_all_osds(struct ceph_osd_client *osdc) |
858 | { | |
048a9d2d | 859 | dout("%s %p\n", __func__, osdc); |
aca420bc SW |
860 | mutex_lock(&osdc->request_mutex); |
861 | while (!RB_EMPTY_ROOT(&osdc->osds)) { | |
862 | struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), | |
863 | struct ceph_osd, o_node); | |
864 | __remove_osd(osdc, osd); | |
865 | } | |
866 | mutex_unlock(&osdc->request_mutex); | |
867 | } | |
868 | ||
f5a2041b YS |
869 | static void __move_osd_to_lru(struct ceph_osd_client *osdc, |
870 | struct ceph_osd *osd) | |
871 | { | |
872 | dout("__move_osd_to_lru %p\n", osd); | |
873 | BUG_ON(!list_empty(&osd->o_osd_lru)); | |
874 | list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); | |
3d14c5d2 | 875 | osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ; |
f5a2041b YS |
876 | } |
877 | ||
878 | static void __remove_osd_from_lru(struct ceph_osd *osd) | |
879 | { | |
880 | dout("__remove_osd_from_lru %p\n", osd); | |
881 | if (!list_empty(&osd->o_osd_lru)) | |
882 | list_del_init(&osd->o_osd_lru); | |
883 | } | |
884 | ||
aca420bc | 885 | static void remove_old_osds(struct ceph_osd_client *osdc) |
f5a2041b YS |
886 | { |
887 | struct ceph_osd *osd, *nosd; | |
888 | ||
889 | dout("__remove_old_osds %p\n", osdc); | |
890 | mutex_lock(&osdc->request_mutex); | |
891 | list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { | |
aca420bc | 892 | if (time_before(jiffies, osd->lru_ttl)) |
f5a2041b YS |
893 | break; |
894 | __remove_osd(osdc, osd); | |
895 | } | |
896 | mutex_unlock(&osdc->request_mutex); | |
897 | } | |
898 | ||
f24e9980 SW |
899 | /* |
900 | * reset osd connect | |
901 | */ | |
f5a2041b | 902 | static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) |
f24e9980 | 903 | { |
c3acb181 | 904 | struct ceph_entity_addr *peer_addr; |
f24e9980 | 905 | |
f5a2041b | 906 | dout("__reset_osd %p osd%d\n", osd, osd->o_osd); |
a40c4f10 YS |
907 | if (list_empty(&osd->o_requests) && |
908 | list_empty(&osd->o_linger_requests)) { | |
f5a2041b | 909 | __remove_osd(osdc, osd); |
c3acb181 AE |
910 | |
911 | return -ENODEV; | |
912 | } | |
913 | ||
914 | peer_addr = &osdc->osdmap->osd_addr[osd->o_osd]; | |
915 | if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && | |
916 | !ceph_con_opened(&osd->o_con)) { | |
917 | struct ceph_osd_request *req; | |
918 | ||
87b315a5 SW |
919 | dout(" osd addr hasn't changed and connection never opened," |
920 | " letting msgr retry"); | |
921 | /* touch each r_stamp for handle_timeout()'s benfit */ | |
922 | list_for_each_entry(req, &osd->o_requests, r_osd_item) | |
923 | req->r_stamp = jiffies; | |
c3acb181 AE |
924 | |
925 | return -EAGAIN; | |
f24e9980 | 926 | } |
c3acb181 AE |
927 | |
928 | ceph_con_close(&osd->o_con); | |
929 | ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); | |
930 | osd->o_incarnation++; | |
931 | ||
932 | return 0; | |
f24e9980 SW |
933 | } |
934 | ||
935 | static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) | |
936 | { | |
937 | struct rb_node **p = &osdc->osds.rb_node; | |
938 | struct rb_node *parent = NULL; | |
939 | struct ceph_osd *osd = NULL; | |
940 | ||
aca420bc | 941 | dout("__insert_osd %p osd%d\n", new, new->o_osd); |
f24e9980 SW |
942 | while (*p) { |
943 | parent = *p; | |
944 | osd = rb_entry(parent, struct ceph_osd, o_node); | |
945 | if (new->o_osd < osd->o_osd) | |
946 | p = &(*p)->rb_left; | |
947 | else if (new->o_osd > osd->o_osd) | |
948 | p = &(*p)->rb_right; | |
949 | else | |
950 | BUG(); | |
951 | } | |
952 | ||
953 | rb_link_node(&new->o_node, parent, p); | |
954 | rb_insert_color(&new->o_node, &osdc->osds); | |
955 | } | |
956 | ||
957 | static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) | |
958 | { | |
959 | struct ceph_osd *osd; | |
960 | struct rb_node *n = osdc->osds.rb_node; | |
961 | ||
962 | while (n) { | |
963 | osd = rb_entry(n, struct ceph_osd, o_node); | |
964 | if (o < osd->o_osd) | |
965 | n = n->rb_left; | |
966 | else if (o > osd->o_osd) | |
967 | n = n->rb_right; | |
968 | else | |
969 | return osd; | |
970 | } | |
971 | return NULL; | |
972 | } | |
973 | ||
422d2cb8 YS |
974 | static void __schedule_osd_timeout(struct ceph_osd_client *osdc) |
975 | { | |
976 | schedule_delayed_work(&osdc->timeout_work, | |
3d14c5d2 | 977 | osdc->client->options->osd_keepalive_timeout * HZ); |
422d2cb8 YS |
978 | } |
979 | ||
980 | static void __cancel_osd_timeout(struct ceph_osd_client *osdc) | |
981 | { | |
982 | cancel_delayed_work(&osdc->timeout_work); | |
983 | } | |
f24e9980 SW |
984 | |
985 | /* | |
986 | * Register request, assign tid. If this is the first request, set up | |
987 | * the timeout event. | |
988 | */ | |
a40c4f10 YS |
989 | static void __register_request(struct ceph_osd_client *osdc, |
990 | struct ceph_osd_request *req) | |
f24e9980 | 991 | { |
f24e9980 | 992 | req->r_tid = ++osdc->last_tid; |
6df058c0 | 993 | req->r_request->hdr.tid = cpu_to_le64(req->r_tid); |
77f38e0e | 994 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
f24e9980 SW |
995 | __insert_request(osdc, req); |
996 | ceph_osdc_get_request(req); | |
997 | osdc->num_requests++; | |
f24e9980 | 998 | if (osdc->num_requests == 1) { |
422d2cb8 YS |
999 | dout(" first request, scheduling timeout\n"); |
1000 | __schedule_osd_timeout(osdc); | |
f24e9980 | 1001 | } |
a40c4f10 YS |
1002 | } |
1003 | ||
f24e9980 SW |
1004 | /* |
1005 | * called under osdc->request_mutex | |
1006 | */ | |
1007 | static void __unregister_request(struct ceph_osd_client *osdc, | |
1008 | struct ceph_osd_request *req) | |
1009 | { | |
35f9f8a0 SW |
1010 | if (RB_EMPTY_NODE(&req->r_node)) { |
1011 | dout("__unregister_request %p tid %lld not registered\n", | |
1012 | req, req->r_tid); | |
1013 | return; | |
1014 | } | |
1015 | ||
f24e9980 SW |
1016 | dout("__unregister_request %p tid %lld\n", req, req->r_tid); |
1017 | rb_erase(&req->r_node, &osdc->requests); | |
1018 | osdc->num_requests--; | |
1019 | ||
0ba6478d SW |
1020 | if (req->r_osd) { |
1021 | /* make sure the original request isn't in flight. */ | |
6740a845 | 1022 | ceph_msg_revoke(req->r_request); |
0ba6478d SW |
1023 | |
1024 | list_del_init(&req->r_osd_item); | |
a40c4f10 YS |
1025 | if (list_empty(&req->r_osd->o_requests) && |
1026 | list_empty(&req->r_osd->o_linger_requests)) { | |
1027 | dout("moving osd to %p lru\n", req->r_osd); | |
f5a2041b | 1028 | __move_osd_to_lru(osdc, req->r_osd); |
a40c4f10 | 1029 | } |
fbdb9190 | 1030 | if (list_empty(&req->r_linger_item)) |
a40c4f10 | 1031 | req->r_osd = NULL; |
0ba6478d | 1032 | } |
f24e9980 | 1033 | |
7d5f2481 | 1034 | list_del_init(&req->r_req_lru_item); |
f24e9980 SW |
1035 | ceph_osdc_put_request(req); |
1036 | ||
422d2cb8 YS |
1037 | if (osdc->num_requests == 0) { |
1038 | dout(" no requests, canceling timeout\n"); | |
1039 | __cancel_osd_timeout(osdc); | |
f24e9980 SW |
1040 | } |
1041 | } | |
1042 | ||
1043 | /* | |
1044 | * Cancel a previously queued request message | |
1045 | */ | |
1046 | static void __cancel_request(struct ceph_osd_request *req) | |
1047 | { | |
6bc18876 | 1048 | if (req->r_sent && req->r_osd) { |
6740a845 | 1049 | ceph_msg_revoke(req->r_request); |
f24e9980 SW |
1050 | req->r_sent = 0; |
1051 | } | |
1052 | } | |
1053 | ||
a40c4f10 YS |
1054 | static void __register_linger_request(struct ceph_osd_client *osdc, |
1055 | struct ceph_osd_request *req) | |
1056 | { | |
1057 | dout("__register_linger_request %p\n", req); | |
1058 | list_add_tail(&req->r_linger_item, &osdc->req_linger); | |
6194ea89 SW |
1059 | if (req->r_osd) |
1060 | list_add_tail(&req->r_linger_osd, | |
1061 | &req->r_osd->o_linger_requests); | |
a40c4f10 YS |
1062 | } |
1063 | ||
1064 | static void __unregister_linger_request(struct ceph_osd_client *osdc, | |
1065 | struct ceph_osd_request *req) | |
1066 | { | |
1067 | dout("__unregister_linger_request %p\n", req); | |
61c74035 | 1068 | list_del_init(&req->r_linger_item); |
a40c4f10 | 1069 | if (req->r_osd) { |
a40c4f10 YS |
1070 | list_del_init(&req->r_linger_osd); |
1071 | ||
1072 | if (list_empty(&req->r_osd->o_requests) && | |
1073 | list_empty(&req->r_osd->o_linger_requests)) { | |
1074 | dout("moving osd to %p lru\n", req->r_osd); | |
1075 | __move_osd_to_lru(osdc, req->r_osd); | |
1076 | } | |
fbdb9190 SW |
1077 | if (list_empty(&req->r_osd_item)) |
1078 | req->r_osd = NULL; | |
a40c4f10 YS |
1079 | } |
1080 | } | |
1081 | ||
1082 | void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc, | |
1083 | struct ceph_osd_request *req) | |
1084 | { | |
1085 | mutex_lock(&osdc->request_mutex); | |
1086 | if (req->r_linger) { | |
1087 | __unregister_linger_request(osdc, req); | |
1088 | ceph_osdc_put_request(req); | |
1089 | } | |
1090 | mutex_unlock(&osdc->request_mutex); | |
1091 | } | |
1092 | EXPORT_SYMBOL(ceph_osdc_unregister_linger_request); | |
1093 | ||
1094 | void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, | |
1095 | struct ceph_osd_request *req) | |
1096 | { | |
1097 | if (!req->r_linger) { | |
1098 | dout("set_request_linger %p\n", req); | |
1099 | req->r_linger = 1; | |
1100 | /* | |
1101 | * caller is now responsible for calling | |
1102 | * unregister_linger_request | |
1103 | */ | |
1104 | ceph_osdc_get_request(req); | |
1105 | } | |
1106 | } | |
1107 | EXPORT_SYMBOL(ceph_osdc_set_request_linger); | |
1108 | ||
f24e9980 SW |
1109 | /* |
1110 | * Pick an osd (the first 'up' osd in the pg), allocate the osd struct | |
1111 | * (as needed), and set the request r_osd appropriately. If there is | |
25985edc | 1112 | * no up osd, set r_osd to NULL. Move the request to the appropriate list |
6f6c7006 | 1113 | * (unsent, homeless) or leave on in-flight lru. |
f24e9980 SW |
1114 | * |
1115 | * Return 0 if unchanged, 1 if changed, or negative on error. | |
1116 | * | |
1117 | * Caller should hold map_sem for read and request_mutex. | |
1118 | */ | |
6f6c7006 | 1119 | static int __map_request(struct ceph_osd_client *osdc, |
38d6453c | 1120 | struct ceph_osd_request *req, int force_resend) |
f24e9980 | 1121 | { |
5b191d99 | 1122 | struct ceph_pg pgid; |
d85b7056 SW |
1123 | int acting[CEPH_PG_MAX_SIZE]; |
1124 | int o = -1, num = 0; | |
f24e9980 | 1125 | int err; |
f24e9980 | 1126 | |
6f6c7006 | 1127 | dout("map_request %p tid %lld\n", req, req->r_tid); |
41766f87 AE |
1128 | err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap, |
1129 | ceph_file_layout_pg_pool(req->r_file_layout)); | |
6f6c7006 SW |
1130 | if (err) { |
1131 | list_move(&req->r_req_lru_item, &osdc->req_notarget); | |
f24e9980 | 1132 | return err; |
6f6c7006 | 1133 | } |
7740a42f SW |
1134 | req->r_pgid = pgid; |
1135 | ||
d85b7056 SW |
1136 | err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting); |
1137 | if (err > 0) { | |
1138 | o = acting[0]; | |
1139 | num = err; | |
1140 | } | |
f24e9980 | 1141 | |
38d6453c SW |
1142 | if ((!force_resend && |
1143 | req->r_osd && req->r_osd->o_osd == o && | |
d85b7056 SW |
1144 | req->r_sent >= req->r_osd->o_incarnation && |
1145 | req->r_num_pg_osds == num && | |
1146 | memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) || | |
f24e9980 SW |
1147 | (req->r_osd == NULL && o == -1)) |
1148 | return 0; /* no change */ | |
1149 | ||
5b191d99 SW |
1150 | dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n", |
1151 | req->r_tid, pgid.pool, pgid.seed, o, | |
f24e9980 SW |
1152 | req->r_osd ? req->r_osd->o_osd : -1); |
1153 | ||
d85b7056 SW |
1154 | /* record full pg acting set */ |
1155 | memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num); | |
1156 | req->r_num_pg_osds = num; | |
1157 | ||
f24e9980 SW |
1158 | if (req->r_osd) { |
1159 | __cancel_request(req); | |
1160 | list_del_init(&req->r_osd_item); | |
f24e9980 SW |
1161 | req->r_osd = NULL; |
1162 | } | |
1163 | ||
1164 | req->r_osd = __lookup_osd(osdc, o); | |
1165 | if (!req->r_osd && o >= 0) { | |
c99eb1c7 | 1166 | err = -ENOMEM; |
e10006f8 | 1167 | req->r_osd = create_osd(osdc, o); |
6f6c7006 SW |
1168 | if (!req->r_osd) { |
1169 | list_move(&req->r_req_lru_item, &osdc->req_notarget); | |
c99eb1c7 | 1170 | goto out; |
6f6c7006 | 1171 | } |
f24e9980 | 1172 | |
6f6c7006 | 1173 | dout("map_request osd %p is osd%d\n", req->r_osd, o); |
f24e9980 SW |
1174 | __insert_osd(osdc, req->r_osd); |
1175 | ||
b7a9e5dd SW |
1176 | ceph_con_open(&req->r_osd->o_con, |
1177 | CEPH_ENTITY_TYPE_OSD, o, | |
1178 | &osdc->osdmap->osd_addr[o]); | |
f24e9980 SW |
1179 | } |
1180 | ||
f5a2041b YS |
1181 | if (req->r_osd) { |
1182 | __remove_osd_from_lru(req->r_osd); | |
ad885927 AE |
1183 | list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); |
1184 | list_move_tail(&req->r_req_lru_item, &osdc->req_unsent); | |
6f6c7006 | 1185 | } else { |
ad885927 | 1186 | list_move_tail(&req->r_req_lru_item, &osdc->req_notarget); |
f5a2041b | 1187 | } |
d85b7056 | 1188 | err = 1; /* osd or pg changed */ |
f24e9980 SW |
1189 | |
1190 | out: | |
f24e9980 SW |
1191 | return err; |
1192 | } | |
1193 | ||
1194 | /* | |
1195 | * caller should hold map_sem (for read) and request_mutex | |
1196 | */ | |
56e925b6 SW |
1197 | static void __send_request(struct ceph_osd_client *osdc, |
1198 | struct ceph_osd_request *req) | |
f24e9980 | 1199 | { |
1b83bef2 | 1200 | void *p; |
f24e9980 | 1201 | |
1b83bef2 SW |
1202 | dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n", |
1203 | req, req->r_tid, req->r_osd->o_osd, req->r_flags, | |
1204 | (unsigned long long)req->r_pgid.pool, req->r_pgid.seed); | |
1205 | ||
1206 | /* fill in message content that changes each time we send it */ | |
1207 | put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch); | |
1208 | put_unaligned_le32(req->r_flags, req->r_request_flags); | |
1209 | put_unaligned_le64(req->r_pgid.pool, req->r_request_pool); | |
1210 | p = req->r_request_pgid; | |
1211 | ceph_encode_64(&p, req->r_pgid.pool); | |
1212 | ceph_encode_32(&p, req->r_pgid.seed); | |
1213 | put_unaligned_le64(1, req->r_request_attempts); /* FIXME */ | |
1214 | memcpy(req->r_request_reassert_version, &req->r_reassert_version, | |
1215 | sizeof(req->r_reassert_version)); | |
2169aea6 | 1216 | |
3dd72fc0 | 1217 | req->r_stamp = jiffies; |
07a27e22 | 1218 | list_move_tail(&req->r_req_lru_item, &osdc->req_lru); |
f24e9980 SW |
1219 | |
1220 | ceph_msg_get(req->r_request); /* send consumes a ref */ | |
1221 | ceph_con_send(&req->r_osd->o_con, req->r_request); | |
1222 | req->r_sent = req->r_osd->o_incarnation; | |
f24e9980 SW |
1223 | } |
1224 | ||
6f6c7006 SW |
1225 | /* |
1226 | * Send any requests in the queue (req_unsent). | |
1227 | */ | |
f9d25199 | 1228 | static void __send_queued(struct ceph_osd_client *osdc) |
6f6c7006 SW |
1229 | { |
1230 | struct ceph_osd_request *req, *tmp; | |
1231 | ||
f9d25199 AE |
1232 | dout("__send_queued\n"); |
1233 | list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) | |
6f6c7006 | 1234 | __send_request(osdc, req); |
6f6c7006 SW |
1235 | } |
1236 | ||
f24e9980 SW |
1237 | /* |
1238 | * Timeout callback, called every N seconds when 1 or more osd | |
1239 | * requests has been active for more than N seconds. When this | |
1240 | * happens, we ping all OSDs with requests who have timed out to | |
1241 | * ensure any communications channel reset is detected. Reset the | |
1242 | * request timeouts another N seconds in the future as we go. | |
1243 | * Reschedule the timeout event another N seconds in future (unless | |
1244 | * there are no open requests). | |
1245 | */ | |
1246 | static void handle_timeout(struct work_struct *work) | |
1247 | { | |
1248 | struct ceph_osd_client *osdc = | |
1249 | container_of(work, struct ceph_osd_client, timeout_work.work); | |
83aff95e | 1250 | struct ceph_osd_request *req; |
f24e9980 | 1251 | struct ceph_osd *osd; |
422d2cb8 | 1252 | unsigned long keepalive = |
3d14c5d2 | 1253 | osdc->client->options->osd_keepalive_timeout * HZ; |
422d2cb8 | 1254 | struct list_head slow_osds; |
f24e9980 SW |
1255 | dout("timeout\n"); |
1256 | down_read(&osdc->map_sem); | |
1257 | ||
1258 | ceph_monc_request_next_osdmap(&osdc->client->monc); | |
1259 | ||
1260 | mutex_lock(&osdc->request_mutex); | |
f24e9980 | 1261 | |
422d2cb8 YS |
1262 | /* |
1263 | * ping osds that are a bit slow. this ensures that if there | |
1264 | * is a break in the TCP connection we will notice, and reopen | |
1265 | * a connection with that osd (from the fault callback). | |
1266 | */ | |
1267 | INIT_LIST_HEAD(&slow_osds); | |
1268 | list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { | |
3dd72fc0 | 1269 | if (time_before(jiffies, req->r_stamp + keepalive)) |
422d2cb8 YS |
1270 | break; |
1271 | ||
1272 | osd = req->r_osd; | |
1273 | BUG_ON(!osd); | |
1274 | dout(" tid %llu is slow, will send keepalive on osd%d\n", | |
f24e9980 | 1275 | req->r_tid, osd->o_osd); |
422d2cb8 YS |
1276 | list_move_tail(&osd->o_keepalive_item, &slow_osds); |
1277 | } | |
1278 | while (!list_empty(&slow_osds)) { | |
1279 | osd = list_entry(slow_osds.next, struct ceph_osd, | |
1280 | o_keepalive_item); | |
1281 | list_del_init(&osd->o_keepalive_item); | |
f24e9980 SW |
1282 | ceph_con_keepalive(&osd->o_con); |
1283 | } | |
1284 | ||
422d2cb8 | 1285 | __schedule_osd_timeout(osdc); |
f9d25199 | 1286 | __send_queued(osdc); |
f24e9980 | 1287 | mutex_unlock(&osdc->request_mutex); |
f24e9980 SW |
1288 | up_read(&osdc->map_sem); |
1289 | } | |
1290 | ||
f5a2041b YS |
1291 | static void handle_osds_timeout(struct work_struct *work) |
1292 | { | |
1293 | struct ceph_osd_client *osdc = | |
1294 | container_of(work, struct ceph_osd_client, | |
1295 | osds_timeout_work.work); | |
1296 | unsigned long delay = | |
3d14c5d2 | 1297 | osdc->client->options->osd_idle_ttl * HZ >> 2; |
f5a2041b YS |
1298 | |
1299 | dout("osds timeout\n"); | |
1300 | down_read(&osdc->map_sem); | |
aca420bc | 1301 | remove_old_osds(osdc); |
f5a2041b YS |
1302 | up_read(&osdc->map_sem); |
1303 | ||
1304 | schedule_delayed_work(&osdc->osds_timeout_work, | |
1305 | round_jiffies_relative(delay)); | |
1306 | } | |
1307 | ||
25845472 SW |
1308 | static void complete_request(struct ceph_osd_request *req) |
1309 | { | |
1310 | if (req->r_safe_callback) | |
1311 | req->r_safe_callback(req, NULL); | |
1312 | complete_all(&req->r_safe_completion); /* fsync waiter */ | |
1313 | } | |
1314 | ||
f24e9980 SW |
1315 | /* |
1316 | * handle osd op reply. either call the callback if it is specified, | |
1317 | * or do the completion to wake up the waiting thread. | |
1318 | */ | |
350b1c32 SW |
1319 | static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, |
1320 | struct ceph_connection *con) | |
f24e9980 | 1321 | { |
1b83bef2 | 1322 | void *p, *end; |
f24e9980 SW |
1323 | struct ceph_osd_request *req; |
1324 | u64 tid; | |
1b83bef2 SW |
1325 | int object_len; |
1326 | int numops, payload_len, flags; | |
0ceed5db | 1327 | s32 result; |
1b83bef2 SW |
1328 | s32 retry_attempt; |
1329 | struct ceph_pg pg; | |
1330 | int err; | |
1331 | u32 reassert_epoch; | |
1332 | u64 reassert_version; | |
1333 | u32 osdmap_epoch; | |
0d5af164 | 1334 | int already_completed; |
9fc6e064 | 1335 | u32 bytes; |
1b83bef2 | 1336 | int i; |
f24e9980 | 1337 | |
6df058c0 | 1338 | tid = le64_to_cpu(msg->hdr.tid); |
1b83bef2 SW |
1339 | dout("handle_reply %p tid %llu\n", msg, tid); |
1340 | ||
1341 | p = msg->front.iov_base; | |
1342 | end = p + msg->front.iov_len; | |
1343 | ||
1344 | ceph_decode_need(&p, end, 4, bad); | |
1345 | object_len = ceph_decode_32(&p); | |
1346 | ceph_decode_need(&p, end, object_len, bad); | |
1347 | p += object_len; | |
1348 | ||
ef4859d6 | 1349 | err = ceph_decode_pgid(&p, end, &pg); |
1b83bef2 | 1350 | if (err) |
f24e9980 | 1351 | goto bad; |
1b83bef2 SW |
1352 | |
1353 | ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad); | |
1354 | flags = ceph_decode_64(&p); | |
1355 | result = ceph_decode_32(&p); | |
1356 | reassert_epoch = ceph_decode_32(&p); | |
1357 | reassert_version = ceph_decode_64(&p); | |
1358 | osdmap_epoch = ceph_decode_32(&p); | |
1359 | ||
f24e9980 SW |
1360 | /* lookup */ |
1361 | mutex_lock(&osdc->request_mutex); | |
1362 | req = __lookup_request(osdc, tid); | |
1363 | if (req == NULL) { | |
1364 | dout("handle_reply tid %llu dne\n", tid); | |
8058fd45 | 1365 | goto bad_mutex; |
f24e9980 SW |
1366 | } |
1367 | ceph_osdc_get_request(req); | |
1b83bef2 SW |
1368 | |
1369 | dout("handle_reply %p tid %llu req %p result %d\n", msg, tid, | |
1370 | req, result); | |
1371 | ||
1372 | ceph_decode_need(&p, end, 4, bad); | |
1373 | numops = ceph_decode_32(&p); | |
1374 | if (numops > CEPH_OSD_MAX_OP) | |
1375 | goto bad_put; | |
1376 | if (numops != req->r_num_ops) | |
1377 | goto bad_put; | |
1378 | payload_len = 0; | |
1379 | ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad); | |
1380 | for (i = 0; i < numops; i++) { | |
1381 | struct ceph_osd_op *op = p; | |
1382 | int len; | |
1383 | ||
1384 | len = le32_to_cpu(op->payload_len); | |
1385 | req->r_reply_op_len[i] = len; | |
1386 | dout(" op %d has %d bytes\n", i, len); | |
1387 | payload_len += len; | |
1388 | p += sizeof(*op); | |
1389 | } | |
9fc6e064 AE |
1390 | bytes = le32_to_cpu(msg->hdr.data_len); |
1391 | if (payload_len != bytes) { | |
1b83bef2 | 1392 | pr_warning("sum of op payload lens %d != data_len %d", |
9fc6e064 | 1393 | payload_len, bytes); |
1b83bef2 SW |
1394 | goto bad_put; |
1395 | } | |
1396 | ||
1397 | ceph_decode_need(&p, end, 4 + numops * 4, bad); | |
1398 | retry_attempt = ceph_decode_32(&p); | |
1399 | for (i = 0; i < numops; i++) | |
1400 | req->r_reply_op_result[i] = ceph_decode_32(&p); | |
f24e9980 | 1401 | |
f24e9980 | 1402 | if (!req->r_got_reply) { |
f24e9980 | 1403 | |
1b83bef2 | 1404 | req->r_result = result; |
f24e9980 SW |
1405 | dout("handle_reply result %d bytes %d\n", req->r_result, |
1406 | bytes); | |
1407 | if (req->r_result == 0) | |
1408 | req->r_result = bytes; | |
1409 | ||
1410 | /* in case this is a write and we need to replay, */ | |
1b83bef2 SW |
1411 | req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch); |
1412 | req->r_reassert_version.version = cpu_to_le64(reassert_version); | |
f24e9980 SW |
1413 | |
1414 | req->r_got_reply = 1; | |
1415 | } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { | |
1416 | dout("handle_reply tid %llu dup ack\n", tid); | |
34b43a56 | 1417 | mutex_unlock(&osdc->request_mutex); |
f24e9980 SW |
1418 | goto done; |
1419 | } | |
1420 | ||
1421 | dout("handle_reply tid %llu flags %d\n", tid, flags); | |
1422 | ||
a40c4f10 YS |
1423 | if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK)) |
1424 | __register_linger_request(osdc, req); | |
1425 | ||
f24e9980 | 1426 | /* either this is a read, or we got the safe response */ |
0ceed5db SW |
1427 | if (result < 0 || |
1428 | (flags & CEPH_OSD_FLAG_ONDISK) || | |
f24e9980 SW |
1429 | ((flags & CEPH_OSD_FLAG_WRITE) == 0)) |
1430 | __unregister_request(osdc, req); | |
1431 | ||
0d5af164 AE |
1432 | already_completed = req->r_completed; |
1433 | req->r_completed = 1; | |
f24e9980 | 1434 | mutex_unlock(&osdc->request_mutex); |
0d5af164 AE |
1435 | if (already_completed) |
1436 | goto done; | |
f24e9980 SW |
1437 | |
1438 | if (req->r_callback) | |
1439 | req->r_callback(req, msg); | |
1440 | else | |
03066f23 | 1441 | complete_all(&req->r_completion); |
f24e9980 | 1442 | |
25845472 SW |
1443 | if (flags & CEPH_OSD_FLAG_ONDISK) |
1444 | complete_request(req); | |
f24e9980 SW |
1445 | |
1446 | done: | |
a40c4f10 | 1447 | dout("req=%p req->r_linger=%d\n", req, req->r_linger); |
f24e9980 SW |
1448 | ceph_osdc_put_request(req); |
1449 | return; | |
1450 | ||
1b83bef2 SW |
1451 | bad_put: |
1452 | ceph_osdc_put_request(req); | |
8058fd45 AE |
1453 | bad_mutex: |
1454 | mutex_unlock(&osdc->request_mutex); | |
f24e9980 | 1455 | bad: |
1b83bef2 SW |
1456 | pr_err("corrupt osd_op_reply got %d %d\n", |
1457 | (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); | |
9ec7cab1 | 1458 | ceph_msg_dump(msg); |
f24e9980 SW |
1459 | } |
1460 | ||
6f6c7006 | 1461 | static void reset_changed_osds(struct ceph_osd_client *osdc) |
f24e9980 | 1462 | { |
f24e9980 | 1463 | struct rb_node *p, *n; |
f24e9980 | 1464 | |
6f6c7006 SW |
1465 | for (p = rb_first(&osdc->osds); p; p = n) { |
1466 | struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); | |
f24e9980 | 1467 | |
6f6c7006 SW |
1468 | n = rb_next(p); |
1469 | if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || | |
1470 | memcmp(&osd->o_con.peer_addr, | |
1471 | ceph_osd_addr(osdc->osdmap, | |
1472 | osd->o_osd), | |
1473 | sizeof(struct ceph_entity_addr)) != 0) | |
1474 | __reset_osd(osdc, osd); | |
f24e9980 | 1475 | } |
422d2cb8 YS |
1476 | } |
1477 | ||
1478 | /* | |
6f6c7006 SW |
1479 | * Requeue requests whose mapping to an OSD has changed. If requests map to |
1480 | * no osd, request a new map. | |
422d2cb8 | 1481 | * |
e6d50f67 | 1482 | * Caller should hold map_sem for read. |
422d2cb8 | 1483 | */ |
38d6453c | 1484 | static void kick_requests(struct ceph_osd_client *osdc, int force_resend) |
422d2cb8 | 1485 | { |
a40c4f10 | 1486 | struct ceph_osd_request *req, *nreq; |
6f6c7006 SW |
1487 | struct rb_node *p; |
1488 | int needmap = 0; | |
1489 | int err; | |
422d2cb8 | 1490 | |
38d6453c | 1491 | dout("kick_requests %s\n", force_resend ? " (force resend)" : ""); |
422d2cb8 | 1492 | mutex_lock(&osdc->request_mutex); |
6194ea89 | 1493 | for (p = rb_first(&osdc->requests); p; ) { |
6f6c7006 | 1494 | req = rb_entry(p, struct ceph_osd_request, r_node); |
6194ea89 | 1495 | p = rb_next(p); |
ab60b16d AE |
1496 | |
1497 | /* | |
1498 | * For linger requests that have not yet been | |
1499 | * registered, move them to the linger list; they'll | |
1500 | * be sent to the osd in the loop below. Unregister | |
1501 | * the request before re-registering it as a linger | |
1502 | * request to ensure the __map_request() below | |
1503 | * will decide it needs to be sent. | |
1504 | */ | |
1505 | if (req->r_linger && list_empty(&req->r_linger_item)) { | |
1506 | dout("%p tid %llu restart on osd%d\n", | |
1507 | req, req->r_tid, | |
1508 | req->r_osd ? req->r_osd->o_osd : -1); | |
1509 | __unregister_request(osdc, req); | |
1510 | __register_linger_request(osdc, req); | |
1511 | continue; | |
1512 | } | |
1513 | ||
38d6453c | 1514 | err = __map_request(osdc, req, force_resend); |
6f6c7006 SW |
1515 | if (err < 0) |
1516 | continue; /* error */ | |
1517 | if (req->r_osd == NULL) { | |
1518 | dout("%p tid %llu maps to no osd\n", req, req->r_tid); | |
1519 | needmap++; /* request a newer map */ | |
1520 | } else if (err > 0) { | |
6194ea89 SW |
1521 | if (!req->r_linger) { |
1522 | dout("%p tid %llu requeued on osd%d\n", req, | |
1523 | req->r_tid, | |
1524 | req->r_osd ? req->r_osd->o_osd : -1); | |
a40c4f10 | 1525 | req->r_flags |= CEPH_OSD_FLAG_RETRY; |
6194ea89 SW |
1526 | } |
1527 | } | |
a40c4f10 YS |
1528 | } |
1529 | ||
1530 | list_for_each_entry_safe(req, nreq, &osdc->req_linger, | |
1531 | r_linger_item) { | |
1532 | dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); | |
1533 | ||
38d6453c | 1534 | err = __map_request(osdc, req, force_resend); |
ab60b16d | 1535 | dout("__map_request returned %d\n", err); |
a40c4f10 YS |
1536 | if (err == 0) |
1537 | continue; /* no change and no osd was specified */ | |
1538 | if (err < 0) | |
1539 | continue; /* hrm! */ | |
1540 | if (req->r_osd == NULL) { | |
1541 | dout("tid %llu maps to no valid osd\n", req->r_tid); | |
1542 | needmap++; /* request a newer map */ | |
1543 | continue; | |
6f6c7006 | 1544 | } |
a40c4f10 YS |
1545 | |
1546 | dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, | |
1547 | req->r_osd ? req->r_osd->o_osd : -1); | |
a40c4f10 | 1548 | __register_request(osdc, req); |
c89ce05e | 1549 | __unregister_linger_request(osdc, req); |
6f6c7006 | 1550 | } |
f24e9980 SW |
1551 | mutex_unlock(&osdc->request_mutex); |
1552 | ||
1553 | if (needmap) { | |
1554 | dout("%d requests for down osds, need new map\n", needmap); | |
1555 | ceph_monc_request_next_osdmap(&osdc->client->monc); | |
1556 | } | |
e6d50f67 | 1557 | reset_changed_osds(osdc); |
422d2cb8 | 1558 | } |
6f6c7006 SW |
1559 | |
1560 | ||
f24e9980 SW |
1561 | /* |
1562 | * Process updated osd map. | |
1563 | * | |
1564 | * The message contains any number of incremental and full maps, normally | |
1565 | * indicating some sort of topology change in the cluster. Kick requests | |
1566 | * off to different OSDs as needed. | |
1567 | */ | |
1568 | void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) | |
1569 | { | |
1570 | void *p, *end, *next; | |
1571 | u32 nr_maps, maplen; | |
1572 | u32 epoch; | |
1573 | struct ceph_osdmap *newmap = NULL, *oldmap; | |
1574 | int err; | |
1575 | struct ceph_fsid fsid; | |
1576 | ||
1577 | dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0); | |
1578 | p = msg->front.iov_base; | |
1579 | end = p + msg->front.iov_len; | |
1580 | ||
1581 | /* verify fsid */ | |
1582 | ceph_decode_need(&p, end, sizeof(fsid), bad); | |
1583 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
0743304d SW |
1584 | if (ceph_check_fsid(osdc->client, &fsid) < 0) |
1585 | return; | |
f24e9980 SW |
1586 | |
1587 | down_write(&osdc->map_sem); | |
1588 | ||
1589 | /* incremental maps */ | |
1590 | ceph_decode_32_safe(&p, end, nr_maps, bad); | |
1591 | dout(" %d inc maps\n", nr_maps); | |
1592 | while (nr_maps > 0) { | |
1593 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); | |
c89136ea SW |
1594 | epoch = ceph_decode_32(&p); |
1595 | maplen = ceph_decode_32(&p); | |
f24e9980 SW |
1596 | ceph_decode_need(&p, end, maplen, bad); |
1597 | next = p + maplen; | |
1598 | if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) { | |
1599 | dout("applying incremental map %u len %d\n", | |
1600 | epoch, maplen); | |
1601 | newmap = osdmap_apply_incremental(&p, next, | |
1602 | osdc->osdmap, | |
15d9882c | 1603 | &osdc->client->msgr); |
f24e9980 SW |
1604 | if (IS_ERR(newmap)) { |
1605 | err = PTR_ERR(newmap); | |
1606 | goto bad; | |
1607 | } | |
30dc6381 | 1608 | BUG_ON(!newmap); |
f24e9980 SW |
1609 | if (newmap != osdc->osdmap) { |
1610 | ceph_osdmap_destroy(osdc->osdmap); | |
1611 | osdc->osdmap = newmap; | |
1612 | } | |
38d6453c | 1613 | kick_requests(osdc, 0); |
f24e9980 SW |
1614 | } else { |
1615 | dout("ignoring incremental map %u len %d\n", | |
1616 | epoch, maplen); | |
1617 | } | |
1618 | p = next; | |
1619 | nr_maps--; | |
1620 | } | |
1621 | if (newmap) | |
1622 | goto done; | |
1623 | ||
1624 | /* full maps */ | |
1625 | ceph_decode_32_safe(&p, end, nr_maps, bad); | |
1626 | dout(" %d full maps\n", nr_maps); | |
1627 | while (nr_maps) { | |
1628 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); | |
c89136ea SW |
1629 | epoch = ceph_decode_32(&p); |
1630 | maplen = ceph_decode_32(&p); | |
f24e9980 SW |
1631 | ceph_decode_need(&p, end, maplen, bad); |
1632 | if (nr_maps > 1) { | |
1633 | dout("skipping non-latest full map %u len %d\n", | |
1634 | epoch, maplen); | |
1635 | } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) { | |
1636 | dout("skipping full map %u len %d, " | |
1637 | "older than our %u\n", epoch, maplen, | |
1638 | osdc->osdmap->epoch); | |
1639 | } else { | |
38d6453c SW |
1640 | int skipped_map = 0; |
1641 | ||
f24e9980 SW |
1642 | dout("taking full map %u len %d\n", epoch, maplen); |
1643 | newmap = osdmap_decode(&p, p+maplen); | |
1644 | if (IS_ERR(newmap)) { | |
1645 | err = PTR_ERR(newmap); | |
1646 | goto bad; | |
1647 | } | |
30dc6381 | 1648 | BUG_ON(!newmap); |
f24e9980 SW |
1649 | oldmap = osdc->osdmap; |
1650 | osdc->osdmap = newmap; | |
38d6453c SW |
1651 | if (oldmap) { |
1652 | if (oldmap->epoch + 1 < newmap->epoch) | |
1653 | skipped_map = 1; | |
f24e9980 | 1654 | ceph_osdmap_destroy(oldmap); |
38d6453c SW |
1655 | } |
1656 | kick_requests(osdc, skipped_map); | |
f24e9980 SW |
1657 | } |
1658 | p += maplen; | |
1659 | nr_maps--; | |
1660 | } | |
1661 | ||
1662 | done: | |
1663 | downgrade_write(&osdc->map_sem); | |
1664 | ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); | |
cd634fb6 SW |
1665 | |
1666 | /* | |
1667 | * subscribe to subsequent osdmap updates if full to ensure | |
1668 | * we find out when we are no longer full and stop returning | |
1669 | * ENOSPC. | |
1670 | */ | |
1671 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) | |
1672 | ceph_monc_request_next_osdmap(&osdc->client->monc); | |
1673 | ||
f9d25199 AE |
1674 | mutex_lock(&osdc->request_mutex); |
1675 | __send_queued(osdc); | |
1676 | mutex_unlock(&osdc->request_mutex); | |
f24e9980 | 1677 | up_read(&osdc->map_sem); |
03066f23 | 1678 | wake_up_all(&osdc->client->auth_wq); |
f24e9980 SW |
1679 | return; |
1680 | ||
1681 | bad: | |
1682 | pr_err("osdc handle_map corrupt msg\n"); | |
9ec7cab1 | 1683 | ceph_msg_dump(msg); |
f24e9980 SW |
1684 | up_write(&osdc->map_sem); |
1685 | return; | |
1686 | } | |
1687 | ||
a40c4f10 YS |
1688 | /* |
1689 | * watch/notify callback event infrastructure | |
1690 | * | |
1691 | * These callbacks are used both for watch and notify operations. | |
1692 | */ | |
1693 | static void __release_event(struct kref *kref) | |
1694 | { | |
1695 | struct ceph_osd_event *event = | |
1696 | container_of(kref, struct ceph_osd_event, kref); | |
1697 | ||
1698 | dout("__release_event %p\n", event); | |
1699 | kfree(event); | |
1700 | } | |
1701 | ||
1702 | static void get_event(struct ceph_osd_event *event) | |
1703 | { | |
1704 | kref_get(&event->kref); | |
1705 | } | |
1706 | ||
1707 | void ceph_osdc_put_event(struct ceph_osd_event *event) | |
1708 | { | |
1709 | kref_put(&event->kref, __release_event); | |
1710 | } | |
1711 | EXPORT_SYMBOL(ceph_osdc_put_event); | |
1712 | ||
1713 | static void __insert_event(struct ceph_osd_client *osdc, | |
1714 | struct ceph_osd_event *new) | |
1715 | { | |
1716 | struct rb_node **p = &osdc->event_tree.rb_node; | |
1717 | struct rb_node *parent = NULL; | |
1718 | struct ceph_osd_event *event = NULL; | |
1719 | ||
1720 | while (*p) { | |
1721 | parent = *p; | |
1722 | event = rb_entry(parent, struct ceph_osd_event, node); | |
1723 | if (new->cookie < event->cookie) | |
1724 | p = &(*p)->rb_left; | |
1725 | else if (new->cookie > event->cookie) | |
1726 | p = &(*p)->rb_right; | |
1727 | else | |
1728 | BUG(); | |
1729 | } | |
1730 | ||
1731 | rb_link_node(&new->node, parent, p); | |
1732 | rb_insert_color(&new->node, &osdc->event_tree); | |
1733 | } | |
1734 | ||
1735 | static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc, | |
1736 | u64 cookie) | |
1737 | { | |
1738 | struct rb_node **p = &osdc->event_tree.rb_node; | |
1739 | struct rb_node *parent = NULL; | |
1740 | struct ceph_osd_event *event = NULL; | |
1741 | ||
1742 | while (*p) { | |
1743 | parent = *p; | |
1744 | event = rb_entry(parent, struct ceph_osd_event, node); | |
1745 | if (cookie < event->cookie) | |
1746 | p = &(*p)->rb_left; | |
1747 | else if (cookie > event->cookie) | |
1748 | p = &(*p)->rb_right; | |
1749 | else | |
1750 | return event; | |
1751 | } | |
1752 | return NULL; | |
1753 | } | |
1754 | ||
1755 | static void __remove_event(struct ceph_osd_event *event) | |
1756 | { | |
1757 | struct ceph_osd_client *osdc = event->osdc; | |
1758 | ||
1759 | if (!RB_EMPTY_NODE(&event->node)) { | |
1760 | dout("__remove_event removed %p\n", event); | |
1761 | rb_erase(&event->node, &osdc->event_tree); | |
1762 | ceph_osdc_put_event(event); | |
1763 | } else { | |
1764 | dout("__remove_event didn't remove %p\n", event); | |
1765 | } | |
1766 | } | |
1767 | ||
1768 | int ceph_osdc_create_event(struct ceph_osd_client *osdc, | |
1769 | void (*event_cb)(u64, u64, u8, void *), | |
3c663bbd | 1770 | void *data, struct ceph_osd_event **pevent) |
a40c4f10 YS |
1771 | { |
1772 | struct ceph_osd_event *event; | |
1773 | ||
1774 | event = kmalloc(sizeof(*event), GFP_NOIO); | |
1775 | if (!event) | |
1776 | return -ENOMEM; | |
1777 | ||
1778 | dout("create_event %p\n", event); | |
1779 | event->cb = event_cb; | |
3c663bbd | 1780 | event->one_shot = 0; |
a40c4f10 YS |
1781 | event->data = data; |
1782 | event->osdc = osdc; | |
1783 | INIT_LIST_HEAD(&event->osd_node); | |
3ee5234d | 1784 | RB_CLEAR_NODE(&event->node); |
a40c4f10 YS |
1785 | kref_init(&event->kref); /* one ref for us */ |
1786 | kref_get(&event->kref); /* one ref for the caller */ | |
a40c4f10 YS |
1787 | |
1788 | spin_lock(&osdc->event_lock); | |
1789 | event->cookie = ++osdc->event_count; | |
1790 | __insert_event(osdc, event); | |
1791 | spin_unlock(&osdc->event_lock); | |
1792 | ||
1793 | *pevent = event; | |
1794 | return 0; | |
1795 | } | |
1796 | EXPORT_SYMBOL(ceph_osdc_create_event); | |
1797 | ||
1798 | void ceph_osdc_cancel_event(struct ceph_osd_event *event) | |
1799 | { | |
1800 | struct ceph_osd_client *osdc = event->osdc; | |
1801 | ||
1802 | dout("cancel_event %p\n", event); | |
1803 | spin_lock(&osdc->event_lock); | |
1804 | __remove_event(event); | |
1805 | spin_unlock(&osdc->event_lock); | |
1806 | ceph_osdc_put_event(event); /* caller's */ | |
1807 | } | |
1808 | EXPORT_SYMBOL(ceph_osdc_cancel_event); | |
1809 | ||
1810 | ||
1811 | static void do_event_work(struct work_struct *work) | |
1812 | { | |
1813 | struct ceph_osd_event_work *event_work = | |
1814 | container_of(work, struct ceph_osd_event_work, work); | |
1815 | struct ceph_osd_event *event = event_work->event; | |
1816 | u64 ver = event_work->ver; | |
1817 | u64 notify_id = event_work->notify_id; | |
1818 | u8 opcode = event_work->opcode; | |
1819 | ||
1820 | dout("do_event_work completing %p\n", event); | |
1821 | event->cb(ver, notify_id, opcode, event->data); | |
a40c4f10 YS |
1822 | dout("do_event_work completed %p\n", event); |
1823 | ceph_osdc_put_event(event); | |
1824 | kfree(event_work); | |
1825 | } | |
1826 | ||
1827 | ||
1828 | /* | |
1829 | * Process osd watch notifications | |
1830 | */ | |
3c663bbd AE |
1831 | static void handle_watch_notify(struct ceph_osd_client *osdc, |
1832 | struct ceph_msg *msg) | |
a40c4f10 YS |
1833 | { |
1834 | void *p, *end; | |
1835 | u8 proto_ver; | |
1836 | u64 cookie, ver, notify_id; | |
1837 | u8 opcode; | |
1838 | struct ceph_osd_event *event; | |
1839 | struct ceph_osd_event_work *event_work; | |
1840 | ||
1841 | p = msg->front.iov_base; | |
1842 | end = p + msg->front.iov_len; | |
1843 | ||
1844 | ceph_decode_8_safe(&p, end, proto_ver, bad); | |
1845 | ceph_decode_8_safe(&p, end, opcode, bad); | |
1846 | ceph_decode_64_safe(&p, end, cookie, bad); | |
1847 | ceph_decode_64_safe(&p, end, ver, bad); | |
1848 | ceph_decode_64_safe(&p, end, notify_id, bad); | |
1849 | ||
1850 | spin_lock(&osdc->event_lock); | |
1851 | event = __find_event(osdc, cookie); | |
1852 | if (event) { | |
3c663bbd | 1853 | BUG_ON(event->one_shot); |
a40c4f10 | 1854 | get_event(event); |
a40c4f10 YS |
1855 | } |
1856 | spin_unlock(&osdc->event_lock); | |
1857 | dout("handle_watch_notify cookie %lld ver %lld event %p\n", | |
1858 | cookie, ver, event); | |
1859 | if (event) { | |
1860 | event_work = kmalloc(sizeof(*event_work), GFP_NOIO); | |
a40c4f10 YS |
1861 | if (!event_work) { |
1862 | dout("ERROR: could not allocate event_work\n"); | |
1863 | goto done_err; | |
1864 | } | |
6b0ae409 | 1865 | INIT_WORK(&event_work->work, do_event_work); |
a40c4f10 YS |
1866 | event_work->event = event; |
1867 | event_work->ver = ver; | |
1868 | event_work->notify_id = notify_id; | |
1869 | event_work->opcode = opcode; | |
1870 | if (!queue_work(osdc->notify_wq, &event_work->work)) { | |
1871 | dout("WARNING: failed to queue notify event work\n"); | |
1872 | goto done_err; | |
1873 | } | |
1874 | } | |
1875 | ||
1876 | return; | |
1877 | ||
1878 | done_err: | |
a40c4f10 YS |
1879 | ceph_osdc_put_event(event); |
1880 | return; | |
1881 | ||
1882 | bad: | |
1883 | pr_err("osdc handle_watch_notify corrupt msg\n"); | |
1884 | return; | |
1885 | } | |
1886 | ||
70636773 AE |
1887 | static void ceph_osdc_msg_data_set(struct ceph_msg *msg, |
1888 | struct ceph_osd_data *osd_data) | |
f24e9980 | 1889 | { |
0fff87ec | 1890 | if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { |
f1baeb2b | 1891 | BUG_ON(osd_data->length > (u64) SIZE_MAX); |
ebf18f47 | 1892 | if (osd_data->length) |
70636773 AE |
1893 | ceph_msg_data_set_pages(msg, osd_data->pages, |
1894 | osd_data->length, osd_data->alignment); | |
9a5e6d09 AE |
1895 | } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { |
1896 | BUG_ON(!osd_data->pagelist->length); | |
1897 | ceph_msg_data_set_pagelist(msg, osd_data->pagelist); | |
68b4476b | 1898 | #ifdef CONFIG_BLOCK |
0fff87ec | 1899 | } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { |
a1930804 | 1900 | ceph_msg_data_set_bio(msg, osd_data->bio, osd_data->bio_length); |
68b4476b | 1901 | #endif |
2ac2b7a6 | 1902 | } else { |
0fff87ec | 1903 | BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); |
2ac2b7a6 | 1904 | } |
70636773 AE |
1905 | } |
1906 | ||
1907 | /* | |
1908 | * Register request, send initial attempt. | |
1909 | */ | |
1910 | int ceph_osdc_start_request(struct ceph_osd_client *osdc, | |
1911 | struct ceph_osd_request *req, | |
1912 | bool nofail) | |
1913 | { | |
1914 | int rc = 0; | |
1915 | ||
1916 | /* Set up response incoming data and request outgoing data fields */ | |
1917 | ||
1918 | ceph_osdc_msg_data_set(req->r_reply, &req->r_data_in); | |
1919 | ceph_osdc_msg_data_set(req->r_request, &req->r_data_out); | |
f24e9980 | 1920 | |
f24e9980 SW |
1921 | down_read(&osdc->map_sem); |
1922 | mutex_lock(&osdc->request_mutex); | |
dc4b870c | 1923 | __register_request(osdc, req); |
92451b49 AE |
1924 | WARN_ON(req->r_sent); |
1925 | rc = __map_request(osdc, req, 0); | |
1926 | if (rc < 0) { | |
1927 | if (nofail) { | |
1928 | dout("osdc_start_request failed map, " | |
1929 | " will retry %lld\n", req->r_tid); | |
1930 | rc = 0; | |
f24e9980 | 1931 | } |
92451b49 | 1932 | goto out_unlock; |
f24e9980 | 1933 | } |
92451b49 AE |
1934 | if (req->r_osd == NULL) { |
1935 | dout("send_request %p no up osds in pg\n", req); | |
1936 | ceph_monc_request_next_osdmap(&osdc->client->monc); | |
1937 | } else { | |
7e2766a1 | 1938 | __send_queued(osdc); |
92451b49 AE |
1939 | } |
1940 | rc = 0; | |
234af26f | 1941 | out_unlock: |
f24e9980 SW |
1942 | mutex_unlock(&osdc->request_mutex); |
1943 | up_read(&osdc->map_sem); | |
1944 | return rc; | |
1945 | } | |
3d14c5d2 | 1946 | EXPORT_SYMBOL(ceph_osdc_start_request); |
f24e9980 SW |
1947 | |
1948 | /* | |
1949 | * wait for a request to complete | |
1950 | */ | |
1951 | int ceph_osdc_wait_request(struct ceph_osd_client *osdc, | |
1952 | struct ceph_osd_request *req) | |
1953 | { | |
1954 | int rc; | |
1955 | ||
1956 | rc = wait_for_completion_interruptible(&req->r_completion); | |
1957 | if (rc < 0) { | |
1958 | mutex_lock(&osdc->request_mutex); | |
1959 | __cancel_request(req); | |
529cfcc4 | 1960 | __unregister_request(osdc, req); |
f24e9980 | 1961 | mutex_unlock(&osdc->request_mutex); |
25845472 | 1962 | complete_request(req); |
529cfcc4 | 1963 | dout("wait_request tid %llu canceled/timed out\n", req->r_tid); |
f24e9980 SW |
1964 | return rc; |
1965 | } | |
1966 | ||
1967 | dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result); | |
1968 | return req->r_result; | |
1969 | } | |
3d14c5d2 | 1970 | EXPORT_SYMBOL(ceph_osdc_wait_request); |
f24e9980 SW |
1971 | |
1972 | /* | |
1973 | * sync - wait for all in-flight requests to flush. avoid starvation. | |
1974 | */ | |
1975 | void ceph_osdc_sync(struct ceph_osd_client *osdc) | |
1976 | { | |
1977 | struct ceph_osd_request *req; | |
1978 | u64 last_tid, next_tid = 0; | |
1979 | ||
1980 | mutex_lock(&osdc->request_mutex); | |
1981 | last_tid = osdc->last_tid; | |
1982 | while (1) { | |
1983 | req = __lookup_request_ge(osdc, next_tid); | |
1984 | if (!req) | |
1985 | break; | |
1986 | if (req->r_tid > last_tid) | |
1987 | break; | |
1988 | ||
1989 | next_tid = req->r_tid + 1; | |
1990 | if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0) | |
1991 | continue; | |
1992 | ||
1993 | ceph_osdc_get_request(req); | |
1994 | mutex_unlock(&osdc->request_mutex); | |
1995 | dout("sync waiting on tid %llu (last is %llu)\n", | |
1996 | req->r_tid, last_tid); | |
1997 | wait_for_completion(&req->r_safe_completion); | |
1998 | mutex_lock(&osdc->request_mutex); | |
1999 | ceph_osdc_put_request(req); | |
2000 | } | |
2001 | mutex_unlock(&osdc->request_mutex); | |
2002 | dout("sync done (thru tid %llu)\n", last_tid); | |
2003 | } | |
3d14c5d2 | 2004 | EXPORT_SYMBOL(ceph_osdc_sync); |
f24e9980 SW |
2005 | |
2006 | /* | |
2007 | * init, shutdown | |
2008 | */ | |
2009 | int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) | |
2010 | { | |
2011 | int err; | |
2012 | ||
2013 | dout("init\n"); | |
2014 | osdc->client = client; | |
2015 | osdc->osdmap = NULL; | |
2016 | init_rwsem(&osdc->map_sem); | |
2017 | init_completion(&osdc->map_waiters); | |
2018 | osdc->last_requested_map = 0; | |
2019 | mutex_init(&osdc->request_mutex); | |
f24e9980 SW |
2020 | osdc->last_tid = 0; |
2021 | osdc->osds = RB_ROOT; | |
f5a2041b | 2022 | INIT_LIST_HEAD(&osdc->osd_lru); |
f24e9980 | 2023 | osdc->requests = RB_ROOT; |
422d2cb8 | 2024 | INIT_LIST_HEAD(&osdc->req_lru); |
6f6c7006 SW |
2025 | INIT_LIST_HEAD(&osdc->req_unsent); |
2026 | INIT_LIST_HEAD(&osdc->req_notarget); | |
a40c4f10 | 2027 | INIT_LIST_HEAD(&osdc->req_linger); |
f24e9980 SW |
2028 | osdc->num_requests = 0; |
2029 | INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); | |
f5a2041b | 2030 | INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); |
a40c4f10 YS |
2031 | spin_lock_init(&osdc->event_lock); |
2032 | osdc->event_tree = RB_ROOT; | |
2033 | osdc->event_count = 0; | |
f5a2041b YS |
2034 | |
2035 | schedule_delayed_work(&osdc->osds_timeout_work, | |
3d14c5d2 | 2036 | round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ)); |
f24e9980 | 2037 | |
5f44f142 | 2038 | err = -ENOMEM; |
f24e9980 SW |
2039 | osdc->req_mempool = mempool_create_kmalloc_pool(10, |
2040 | sizeof(struct ceph_osd_request)); | |
2041 | if (!osdc->req_mempool) | |
5f44f142 | 2042 | goto out; |
f24e9980 | 2043 | |
d50b409f SW |
2044 | err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, |
2045 | OSD_OP_FRONT_LEN, 10, true, | |
4f48280e | 2046 | "osd_op"); |
f24e9980 | 2047 | if (err < 0) |
5f44f142 | 2048 | goto out_mempool; |
d50b409f | 2049 | err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, |
4f48280e SW |
2050 | OSD_OPREPLY_FRONT_LEN, 10, true, |
2051 | "osd_op_reply"); | |
c16e7869 SW |
2052 | if (err < 0) |
2053 | goto out_msgpool; | |
a40c4f10 YS |
2054 | |
2055 | osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); | |
2056 | if (IS_ERR(osdc->notify_wq)) { | |
2057 | err = PTR_ERR(osdc->notify_wq); | |
2058 | osdc->notify_wq = NULL; | |
2059 | goto out_msgpool; | |
2060 | } | |
f24e9980 | 2061 | return 0; |
5f44f142 | 2062 | |
c16e7869 SW |
2063 | out_msgpool: |
2064 | ceph_msgpool_destroy(&osdc->msgpool_op); | |
5f44f142 SW |
2065 | out_mempool: |
2066 | mempool_destroy(osdc->req_mempool); | |
2067 | out: | |
2068 | return err; | |
f24e9980 SW |
2069 | } |
2070 | ||
2071 | void ceph_osdc_stop(struct ceph_osd_client *osdc) | |
2072 | { | |
a40c4f10 YS |
2073 | flush_workqueue(osdc->notify_wq); |
2074 | destroy_workqueue(osdc->notify_wq); | |
f24e9980 | 2075 | cancel_delayed_work_sync(&osdc->timeout_work); |
f5a2041b | 2076 | cancel_delayed_work_sync(&osdc->osds_timeout_work); |
f24e9980 SW |
2077 | if (osdc->osdmap) { |
2078 | ceph_osdmap_destroy(osdc->osdmap); | |
2079 | osdc->osdmap = NULL; | |
2080 | } | |
aca420bc | 2081 | remove_all_osds(osdc); |
f24e9980 SW |
2082 | mempool_destroy(osdc->req_mempool); |
2083 | ceph_msgpool_destroy(&osdc->msgpool_op); | |
c16e7869 | 2084 | ceph_msgpool_destroy(&osdc->msgpool_op_reply); |
f24e9980 SW |
2085 | } |
2086 | ||
2087 | /* | |
2088 | * Read some contiguous pages. If we cross a stripe boundary, shorten | |
2089 | * *plen. Return number of bytes read, or error. | |
2090 | */ | |
2091 | int ceph_osdc_readpages(struct ceph_osd_client *osdc, | |
2092 | struct ceph_vino vino, struct ceph_file_layout *layout, | |
2093 | u64 off, u64 *plen, | |
2094 | u32 truncate_seq, u64 truncate_size, | |
b7495fc2 | 2095 | struct page **pages, int num_pages, int page_align) |
f24e9980 SW |
2096 | { |
2097 | struct ceph_osd_request *req; | |
acead002 | 2098 | struct ceph_osd_req_op op; |
f24e9980 SW |
2099 | int rc = 0; |
2100 | ||
2101 | dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, | |
2102 | vino.snap, off, *plen); | |
acead002 | 2103 | req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1, &op, |
f24e9980 | 2104 | CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, |
acead002 | 2105 | NULL, truncate_seq, truncate_size, |
153e5167 | 2106 | false); |
6816282d SW |
2107 | if (IS_ERR(req)) |
2108 | return PTR_ERR(req); | |
f24e9980 SW |
2109 | |
2110 | /* it may be a short read due to an object boundary */ | |
0fff87ec | 2111 | |
43bfe5de AE |
2112 | ceph_osd_data_pages_init(&req->r_data_in, pages, *plen, page_align, |
2113 | false, false); | |
f24e9980 | 2114 | |
e0c59487 | 2115 | dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", |
43bfe5de | 2116 | off, *plen, *plen, page_align); |
f24e9980 | 2117 | |
02ee07d3 AE |
2118 | ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); |
2119 | ||
f24e9980 SW |
2120 | rc = ceph_osdc_start_request(osdc, req, false); |
2121 | if (!rc) | |
2122 | rc = ceph_osdc_wait_request(osdc, req); | |
2123 | ||
2124 | ceph_osdc_put_request(req); | |
2125 | dout("readpages result %d\n", rc); | |
2126 | return rc; | |
2127 | } | |
3d14c5d2 | 2128 | EXPORT_SYMBOL(ceph_osdc_readpages); |
f24e9980 SW |
2129 | |
2130 | /* | |
2131 | * do a synchronous write on N pages | |
2132 | */ | |
2133 | int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, | |
2134 | struct ceph_file_layout *layout, | |
2135 | struct ceph_snap_context *snapc, | |
2136 | u64 off, u64 len, | |
2137 | u32 truncate_seq, u64 truncate_size, | |
2138 | struct timespec *mtime, | |
24808826 | 2139 | struct page **pages, int num_pages) |
f24e9980 SW |
2140 | { |
2141 | struct ceph_osd_request *req; | |
acead002 | 2142 | struct ceph_osd_req_op op; |
f24e9980 | 2143 | int rc = 0; |
b7495fc2 | 2144 | int page_align = off & ~PAGE_MASK; |
f24e9980 | 2145 | |
acead002 AE |
2146 | BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */ |
2147 | req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1, &op, | |
f24e9980 | 2148 | CEPH_OSD_OP_WRITE, |
24808826 | 2149 | CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, |
acead002 | 2150 | snapc, truncate_seq, truncate_size, |
153e5167 | 2151 | true); |
6816282d SW |
2152 | if (IS_ERR(req)) |
2153 | return PTR_ERR(req); | |
f24e9980 SW |
2154 | |
2155 | /* it may be a short write due to an object boundary */ | |
43bfe5de AE |
2156 | ceph_osd_data_pages_init(&req->r_data_out, pages, len, page_align, |
2157 | false, false); | |
2158 | dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); | |
f24e9980 | 2159 | |
02ee07d3 AE |
2160 | ceph_osdc_build_request(req, off, 1, &op, snapc, CEPH_NOSNAP, mtime); |
2161 | ||
87f979d3 | 2162 | rc = ceph_osdc_start_request(osdc, req, true); |
f24e9980 SW |
2163 | if (!rc) |
2164 | rc = ceph_osdc_wait_request(osdc, req); | |
2165 | ||
2166 | ceph_osdc_put_request(req); | |
2167 | if (rc == 0) | |
2168 | rc = len; | |
2169 | dout("writepages result %d\n", rc); | |
2170 | return rc; | |
2171 | } | |
3d14c5d2 | 2172 | EXPORT_SYMBOL(ceph_osdc_writepages); |
f24e9980 SW |
2173 | |
2174 | /* | |
2175 | * handle incoming message | |
2176 | */ | |
2177 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
2178 | { | |
2179 | struct ceph_osd *osd = con->private; | |
32c895e7 | 2180 | struct ceph_osd_client *osdc; |
f24e9980 SW |
2181 | int type = le16_to_cpu(msg->hdr.type); |
2182 | ||
2183 | if (!osd) | |
4a32f93d | 2184 | goto out; |
32c895e7 | 2185 | osdc = osd->o_osdc; |
f24e9980 SW |
2186 | |
2187 | switch (type) { | |
2188 | case CEPH_MSG_OSD_MAP: | |
2189 | ceph_osdc_handle_map(osdc, msg); | |
2190 | break; | |
2191 | case CEPH_MSG_OSD_OPREPLY: | |
350b1c32 | 2192 | handle_reply(osdc, msg, con); |
f24e9980 | 2193 | break; |
a40c4f10 YS |
2194 | case CEPH_MSG_WATCH_NOTIFY: |
2195 | handle_watch_notify(osdc, msg); | |
2196 | break; | |
f24e9980 SW |
2197 | |
2198 | default: | |
2199 | pr_err("received unknown message type %d %s\n", type, | |
2200 | ceph_msg_type_name(type)); | |
2201 | } | |
4a32f93d | 2202 | out: |
f24e9980 SW |
2203 | ceph_msg_put(msg); |
2204 | } | |
2205 | ||
5b3a4db3 | 2206 | /* |
21b667f6 SW |
2207 | * lookup and return message for incoming reply. set up reply message |
2208 | * pages. | |
5b3a4db3 SW |
2209 | */ |
2210 | static struct ceph_msg *get_reply(struct ceph_connection *con, | |
2450418c YS |
2211 | struct ceph_msg_header *hdr, |
2212 | int *skip) | |
f24e9980 SW |
2213 | { |
2214 | struct ceph_osd *osd = con->private; | |
2215 | struct ceph_osd_client *osdc = osd->o_osdc; | |
2450418c | 2216 | struct ceph_msg *m; |
0547a9b3 | 2217 | struct ceph_osd_request *req; |
5b3a4db3 SW |
2218 | int front = le32_to_cpu(hdr->front_len); |
2219 | int data_len = le32_to_cpu(hdr->data_len); | |
0547a9b3 | 2220 | u64 tid; |
f24e9980 | 2221 | |
0547a9b3 YS |
2222 | tid = le64_to_cpu(hdr->tid); |
2223 | mutex_lock(&osdc->request_mutex); | |
2224 | req = __lookup_request(osdc, tid); | |
2225 | if (!req) { | |
2226 | *skip = 1; | |
2227 | m = NULL; | |
756a16a5 SW |
2228 | dout("get_reply unknown tid %llu from osd%d\n", tid, |
2229 | osd->o_osd); | |
0547a9b3 YS |
2230 | goto out; |
2231 | } | |
c16e7869 | 2232 | |
ace6d3a9 | 2233 | if (req->r_reply->con) |
8921d114 | 2234 | dout("%s revoking msg %p from old con %p\n", __func__, |
ace6d3a9 AE |
2235 | req->r_reply, req->r_reply->con); |
2236 | ceph_msg_revoke_incoming(req->r_reply); | |
0547a9b3 | 2237 | |
c16e7869 SW |
2238 | if (front > req->r_reply->front.iov_len) { |
2239 | pr_warning("get_reply front %d > preallocated %d\n", | |
2240 | front, (int)req->r_reply->front.iov_len); | |
b61c2763 | 2241 | m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false); |
a79832f2 | 2242 | if (!m) |
c16e7869 SW |
2243 | goto out; |
2244 | ceph_msg_put(req->r_reply); | |
2245 | req->r_reply = m; | |
2246 | } | |
2247 | m = ceph_msg_get(req->r_reply); | |
2248 | ||
0547a9b3 | 2249 | if (data_len > 0) { |
0fff87ec AE |
2250 | struct ceph_osd_data *osd_data = &req->r_data_in; |
2251 | ||
2252 | if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { | |
0fff87ec | 2253 | if (osd_data->pages && |
e0c59487 | 2254 | unlikely(osd_data->length < data_len)) { |
2ac2b7a6 | 2255 | |
e0c59487 AE |
2256 | pr_warning("tid %lld reply has %d bytes " |
2257 | "we had only %llu bytes ready\n", | |
2258 | tid, data_len, osd_data->length); | |
2ac2b7a6 AE |
2259 | *skip = 1; |
2260 | ceph_msg_put(m); | |
2261 | m = NULL; | |
2262 | goto out; | |
2263 | } | |
2ac2b7a6 | 2264 | } |
0547a9b3 | 2265 | } |
5b3a4db3 | 2266 | *skip = 0; |
c16e7869 | 2267 | dout("get_reply tid %lld %p\n", tid, m); |
0547a9b3 YS |
2268 | |
2269 | out: | |
2270 | mutex_unlock(&osdc->request_mutex); | |
2450418c | 2271 | return m; |
5b3a4db3 SW |
2272 | |
2273 | } | |
2274 | ||
2275 | static struct ceph_msg *alloc_msg(struct ceph_connection *con, | |
2276 | struct ceph_msg_header *hdr, | |
2277 | int *skip) | |
2278 | { | |
2279 | struct ceph_osd *osd = con->private; | |
2280 | int type = le16_to_cpu(hdr->type); | |
2281 | int front = le32_to_cpu(hdr->front_len); | |
2282 | ||
1c20f2d2 | 2283 | *skip = 0; |
5b3a4db3 SW |
2284 | switch (type) { |
2285 | case CEPH_MSG_OSD_MAP: | |
a40c4f10 | 2286 | case CEPH_MSG_WATCH_NOTIFY: |
b61c2763 | 2287 | return ceph_msg_new(type, front, GFP_NOFS, false); |
5b3a4db3 SW |
2288 | case CEPH_MSG_OSD_OPREPLY: |
2289 | return get_reply(con, hdr, skip); | |
2290 | default: | |
2291 | pr_info("alloc_msg unexpected msg type %d from osd%d\n", type, | |
2292 | osd->o_osd); | |
2293 | *skip = 1; | |
2294 | return NULL; | |
2295 | } | |
f24e9980 SW |
2296 | } |
2297 | ||
2298 | /* | |
2299 | * Wrappers to refcount containing ceph_osd struct | |
2300 | */ | |
2301 | static struct ceph_connection *get_osd_con(struct ceph_connection *con) | |
2302 | { | |
2303 | struct ceph_osd *osd = con->private; | |
2304 | if (get_osd(osd)) | |
2305 | return con; | |
2306 | return NULL; | |
2307 | } | |
2308 | ||
2309 | static void put_osd_con(struct ceph_connection *con) | |
2310 | { | |
2311 | struct ceph_osd *osd = con->private; | |
2312 | put_osd(osd); | |
2313 | } | |
2314 | ||
4e7a5dcd SW |
2315 | /* |
2316 | * authentication | |
2317 | */ | |
a3530df3 AE |
2318 | /* |
2319 | * Note: returned pointer is the address of a structure that's | |
2320 | * managed separately. Caller must *not* attempt to free it. | |
2321 | */ | |
2322 | static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | |
8f43fb53 | 2323 | int *proto, int force_new) |
4e7a5dcd SW |
2324 | { |
2325 | struct ceph_osd *o = con->private; | |
2326 | struct ceph_osd_client *osdc = o->o_osdc; | |
2327 | struct ceph_auth_client *ac = osdc->client->monc.auth; | |
74f1869f | 2328 | struct ceph_auth_handshake *auth = &o->o_auth; |
4e7a5dcd | 2329 | |
74f1869f | 2330 | if (force_new && auth->authorizer) { |
27859f97 | 2331 | ceph_auth_destroy_authorizer(ac, auth->authorizer); |
74f1869f AE |
2332 | auth->authorizer = NULL; |
2333 | } | |
27859f97 SW |
2334 | if (!auth->authorizer) { |
2335 | int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, | |
2336 | auth); | |
4e7a5dcd | 2337 | if (ret) |
a3530df3 | 2338 | return ERR_PTR(ret); |
27859f97 SW |
2339 | } else { |
2340 | int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, | |
0bed9b5c SW |
2341 | auth); |
2342 | if (ret) | |
2343 | return ERR_PTR(ret); | |
4e7a5dcd | 2344 | } |
4e7a5dcd | 2345 | *proto = ac->protocol; |
74f1869f | 2346 | |
a3530df3 | 2347 | return auth; |
4e7a5dcd SW |
2348 | } |
2349 | ||
2350 | ||
2351 | static int verify_authorizer_reply(struct ceph_connection *con, int len) | |
2352 | { | |
2353 | struct ceph_osd *o = con->private; | |
2354 | struct ceph_osd_client *osdc = o->o_osdc; | |
2355 | struct ceph_auth_client *ac = osdc->client->monc.auth; | |
2356 | ||
27859f97 | 2357 | return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len); |
4e7a5dcd SW |
2358 | } |
2359 | ||
9bd2e6f8 SW |
2360 | static int invalidate_authorizer(struct ceph_connection *con) |
2361 | { | |
2362 | struct ceph_osd *o = con->private; | |
2363 | struct ceph_osd_client *osdc = o->o_osdc; | |
2364 | struct ceph_auth_client *ac = osdc->client->monc.auth; | |
2365 | ||
27859f97 | 2366 | ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); |
9bd2e6f8 SW |
2367 | return ceph_monc_validate_auth(&osdc->client->monc); |
2368 | } | |
4e7a5dcd | 2369 | |
9e32789f | 2370 | static const struct ceph_connection_operations osd_con_ops = { |
f24e9980 SW |
2371 | .get = get_osd_con, |
2372 | .put = put_osd_con, | |
2373 | .dispatch = dispatch, | |
4e7a5dcd SW |
2374 | .get_authorizer = get_authorizer, |
2375 | .verify_authorizer_reply = verify_authorizer_reply, | |
9bd2e6f8 | 2376 | .invalidate_authorizer = invalidate_authorizer, |
f24e9980 | 2377 | .alloc_msg = alloc_msg, |
81b024e7 | 2378 | .fault = osd_reset, |
f24e9980 | 2379 | }; |