ceph: let osd client clean up for interrupted request
[linux-2.6-block.git] / net / ceph / osd_client.c
CommitLineData
a4ce40a9 1
3d14c5d2 2#include <linux/ceph/ceph_debug.h>
f24e9980 3
3d14c5d2 4#include <linux/module.h>
f24e9980
SW
5#include <linux/err.h>
6#include <linux/highmem.h>
7#include <linux/mm.h>
8#include <linux/pagemap.h>
9#include <linux/slab.h>
10#include <linux/uaccess.h>
68b4476b
YS
11#ifdef CONFIG_BLOCK
12#include <linux/bio.h>
13#endif
f24e9980 14
3d14c5d2
YS
15#include <linux/ceph/libceph.h>
16#include <linux/ceph/osd_client.h>
17#include <linux/ceph/messenger.h>
18#include <linux/ceph/decode.h>
19#include <linux/ceph/auth.h>
20#include <linux/ceph/pagelist.h>
f24e9980 21
c16e7869
SW
22#define OSD_OP_FRONT_LEN 4096
23#define OSD_OPREPLY_FRONT_LEN 512
0d59ab81 24
9e32789f 25static const struct ceph_connection_operations osd_con_ops;
f24e9980 26
f9d25199 27static void __send_queued(struct ceph_osd_client *osdc);
6f6c7006 28static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
a40c4f10
YS
29static void __register_request(struct ceph_osd_client *osdc,
30 struct ceph_osd_request *req);
31static void __unregister_linger_request(struct ceph_osd_client *osdc,
32 struct ceph_osd_request *req);
56e925b6
SW
33static void __send_request(struct ceph_osd_client *osdc,
34 struct ceph_osd_request *req);
f24e9980
SW
35
36/*
37 * Implement client access to distributed object storage cluster.
38 *
39 * All data objects are stored within a cluster/cloud of OSDs, or
40 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
41 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
42 * remote daemons serving up and coordinating consistent and safe
43 * access to storage.
44 *
45 * Cluster membership and the mapping of data objects onto storage devices
46 * are described by the osd map.
47 *
48 * We keep track of pending OSD requests (read, write), resubmit
49 * requests to different OSDs when the cluster topology/data layout
50 * change, or retry the affected requests when the communications
51 * channel with an OSD is reset.
52 */
53
54/*
55 * calculate the mapping of a file extent onto an object, and fill out the
56 * request accordingly. shorten extent as necessary if it crosses an
57 * object boundary.
58 *
59 * fill osd op in request message.
60 */
dbe0fc41 61static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
a19dadfb 62 u64 *objnum, u64 *objoff, u64 *objlen)
f24e9980 63{
60e56f13 64 u64 orig_len = *plen;
d63b77f4 65 int r;
f24e9980 66
60e56f13 67 /* object extent? */
75d1c941
AE
68 r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
69 objoff, objlen);
d63b77f4
SW
70 if (r < 0)
71 return r;
75d1c941
AE
72 if (*objlen < orig_len) {
73 *plen = *objlen;
60e56f13
AE
74 dout(" skipping last %llu, final file extent %llu~%llu\n",
75 orig_len - *plen, off, *plen);
76 }
77
75d1c941 78 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
f24e9980 79
3ff5f385 80 return 0;
f24e9980
SW
81}
82
c54d47bf
AE
83static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
84{
85 memset(osd_data, 0, sizeof (*osd_data));
86 osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
87}
88
a4ce40a9 89static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
43bfe5de
AE
90 struct page **pages, u64 length, u32 alignment,
91 bool pages_from_pool, bool own_pages)
92{
93 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
94 osd_data->pages = pages;
95 osd_data->length = length;
96 osd_data->alignment = alignment;
97 osd_data->pages_from_pool = pages_from_pool;
98 osd_data->own_pages = own_pages;
99}
43bfe5de 100
a4ce40a9 101static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
43bfe5de
AE
102 struct ceph_pagelist *pagelist)
103{
104 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
105 osd_data->pagelist = pagelist;
106}
43bfe5de
AE
107
108#ifdef CONFIG_BLOCK
a4ce40a9 109static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
43bfe5de
AE
110 struct bio *bio, size_t bio_length)
111{
112 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
113 osd_data->bio = bio;
114 osd_data->bio_length = bio_length;
115}
43bfe5de
AE
116#endif /* CONFIG_BLOCK */
117
a4ce40a9
AE
118struct ceph_osd_data *
119osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
120 unsigned int which, bool write_request)
121{
122 BUG_ON(which >= osd_req->r_num_ops);
123
5476492f 124 return &osd_req->r_ops[which].extent.osd_data;
a4ce40a9
AE
125}
126EXPORT_SYMBOL(osd_req_op_extent_osd_data);
127
128struct ceph_osd_data *
129osd_req_op_cls_request_info(struct ceph_osd_request *osd_req,
130 unsigned int which)
131{
132 BUG_ON(which >= osd_req->r_num_ops);
133
5476492f 134 return &osd_req->r_ops[which].cls.request_info;
a4ce40a9
AE
135}
136EXPORT_SYMBOL(osd_req_op_cls_request_info); /* ??? */
137
04017e29
AE
138struct ceph_osd_data *
139osd_req_op_cls_request_data(struct ceph_osd_request *osd_req,
140 unsigned int which)
141{
142 BUG_ON(which >= osd_req->r_num_ops);
143
144 return &osd_req->r_ops[which].cls.request_data;
145}
146EXPORT_SYMBOL(osd_req_op_cls_request_data); /* ??? */
147
a4ce40a9
AE
148struct ceph_osd_data *
149osd_req_op_cls_response_data(struct ceph_osd_request *osd_req,
150 unsigned int which)
151{
152 BUG_ON(which >= osd_req->r_num_ops);
153
5476492f 154 return &osd_req->r_ops[which].cls.response_data;
a4ce40a9
AE
155}
156EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */
157
158void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
159 unsigned int which, bool write_request,
160 struct page **pages, u64 length, u32 alignment,
161 bool pages_from_pool, bool own_pages)
162{
163 struct ceph_osd_data *osd_data;
164
165 osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
166 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
167 pages_from_pool, own_pages);
a4ce40a9
AE
168}
169EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
170
171void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
172 unsigned int which, bool write_request,
173 struct ceph_pagelist *pagelist)
174{
175 struct ceph_osd_data *osd_data;
176
177 osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
178 ceph_osd_data_pagelist_init(osd_data, pagelist);
a4ce40a9
AE
179}
180EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
181
182#ifdef CONFIG_BLOCK
183void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
184 unsigned int which, bool write_request,
185 struct bio *bio, size_t bio_length)
186{
187 struct ceph_osd_data *osd_data;
188
189 osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
190 ceph_osd_data_bio_init(osd_data, bio, bio_length);
a4ce40a9
AE
191}
192EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
193#endif /* CONFIG_BLOCK */
194
195static void osd_req_op_cls_request_info_pagelist(
196 struct ceph_osd_request *osd_req,
197 unsigned int which, struct ceph_pagelist *pagelist)
198{
199 struct ceph_osd_data *osd_data;
200
201 osd_data = osd_req_op_cls_request_info(osd_req, which);
202 ceph_osd_data_pagelist_init(osd_data, pagelist);
a4ce40a9
AE
203}
204
04017e29
AE
205void osd_req_op_cls_request_data_pagelist(
206 struct ceph_osd_request *osd_req,
207 unsigned int which, struct ceph_pagelist *pagelist)
208{
209 struct ceph_osd_data *osd_data;
210
211 osd_data = osd_req_op_cls_request_data(osd_req, which);
212 ceph_osd_data_pagelist_init(osd_data, pagelist);
213}
214EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
215
a4ce40a9
AE
216void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
217 unsigned int which, struct page **pages, u64 length,
218 u32 alignment, bool pages_from_pool, bool own_pages)
219{
220 struct ceph_osd_data *osd_data;
221
222 osd_data = osd_req_op_cls_response_data(osd_req, which);
223 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
224 pages_from_pool, own_pages);
a4ce40a9
AE
225}
226EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
227
23c08a9c
AE
228static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
229{
230 switch (osd_data->type) {
231 case CEPH_OSD_DATA_TYPE_NONE:
232 return 0;
233 case CEPH_OSD_DATA_TYPE_PAGES:
234 return osd_data->length;
235 case CEPH_OSD_DATA_TYPE_PAGELIST:
236 return (u64)osd_data->pagelist->length;
237#ifdef CONFIG_BLOCK
238 case CEPH_OSD_DATA_TYPE_BIO:
239 return (u64)osd_data->bio_length;
240#endif /* CONFIG_BLOCK */
241 default:
242 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
243 return 0;
244 }
245}
246
5476492f 247
c54d47bf
AE
248static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
249{
5476492f 250 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
c54d47bf
AE
251 int num_pages;
252
253 num_pages = calc_pages_for((u64)osd_data->alignment,
254 (u64)osd_data->length);
255 ceph_release_page_vector(osd_data->pages, num_pages);
256 }
5476492f
AE
257 ceph_osd_data_init(osd_data);
258}
259
260static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
261 unsigned int which)
262{
263 struct ceph_osd_req_op *op;
264
265 BUG_ON(which >= osd_req->r_num_ops);
266 op = &osd_req->r_ops[which];
267
268 switch (op->op) {
269 case CEPH_OSD_OP_READ:
270 case CEPH_OSD_OP_WRITE:
271 ceph_osd_data_release(&op->extent.osd_data);
272 break;
273 case CEPH_OSD_OP_CALL:
274 ceph_osd_data_release(&op->cls.request_info);
04017e29 275 ceph_osd_data_release(&op->cls.request_data);
5476492f
AE
276 ceph_osd_data_release(&op->cls.response_data);
277 break;
278 default:
279 break;
280 }
c54d47bf
AE
281}
282
f24e9980
SW
283/*
284 * requests
285 */
415e49a9 286void ceph_osdc_release_request(struct kref *kref)
f24e9980 287{
c54d47bf 288 struct ceph_osd_request *req;
5476492f 289 unsigned int which;
415e49a9 290
c54d47bf 291 req = container_of(kref, struct ceph_osd_request, r_kref);
415e49a9
SW
292 if (req->r_request)
293 ceph_msg_put(req->r_request);
ace6d3a9 294 if (req->r_reply) {
8921d114 295 ceph_msg_revoke_incoming(req->r_reply);
ab8cb34a 296 ceph_msg_put(req->r_reply);
ace6d3a9 297 }
0fff87ec 298
5476492f
AE
299 for (which = 0; which < req->r_num_ops; which++)
300 osd_req_op_data_release(req, which);
0fff87ec 301
415e49a9
SW
302 ceph_put_snap_context(req->r_snapc);
303 if (req->r_mempool)
304 mempool_free(req, req->r_osdc->req_mempool);
305 else
306 kfree(req);
f24e9980 307}
3d14c5d2 308EXPORT_SYMBOL(ceph_osdc_release_request);
68b4476b 309
3499e8a5 310struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
f24e9980 311 struct ceph_snap_context *snapc,
1b83bef2 312 unsigned int num_ops,
3499e8a5 313 bool use_mempool,
54a54007 314 gfp_t gfp_flags)
f24e9980
SW
315{
316 struct ceph_osd_request *req;
317 struct ceph_msg *msg;
1b83bef2
SW
318 size_t msg_size;
319
79528734
AE
320 BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX);
321 BUG_ON(num_ops > CEPH_OSD_MAX_OP);
322
1b83bef2
SW
323 msg_size = 4 + 4 + 8 + 8 + 4+8;
324 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
325 msg_size += 1 + 8 + 4 + 4; /* pg_t */
326 msg_size += 4 + MAX_OBJ_NAME_SIZE;
327 msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
328 msg_size += 8; /* snapid */
329 msg_size += 8; /* snap_seq */
330 msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
331 msg_size += 4;
f24e9980
SW
332
333 if (use_mempool) {
3499e8a5 334 req = mempool_alloc(osdc->req_mempool, gfp_flags);
f24e9980
SW
335 memset(req, 0, sizeof(*req));
336 } else {
3499e8a5 337 req = kzalloc(sizeof(*req), gfp_flags);
f24e9980
SW
338 }
339 if (req == NULL)
a79832f2 340 return NULL;
f24e9980 341
f24e9980
SW
342 req->r_osdc = osdc;
343 req->r_mempool = use_mempool;
79528734 344 req->r_num_ops = num_ops;
68b4476b 345
415e49a9 346 kref_init(&req->r_kref);
f24e9980
SW
347 init_completion(&req->r_completion);
348 init_completion(&req->r_safe_completion);
a978fa20 349 RB_CLEAR_NODE(&req->r_node);
f24e9980 350 INIT_LIST_HEAD(&req->r_unsafe_item);
a40c4f10
YS
351 INIT_LIST_HEAD(&req->r_linger_item);
352 INIT_LIST_HEAD(&req->r_linger_osd);
935b639a 353 INIT_LIST_HEAD(&req->r_req_lru_item);
cd43045c
SW
354 INIT_LIST_HEAD(&req->r_osd_item);
355
c16e7869
SW
356 /* create reply message */
357 if (use_mempool)
358 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
359 else
360 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
b61c2763 361 OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
a79832f2 362 if (!msg) {
c16e7869 363 ceph_osdc_put_request(req);
a79832f2 364 return NULL;
c16e7869
SW
365 }
366 req->r_reply = msg;
367
368 /* create request message; allow space for oid */
f24e9980 369 if (use_mempool)
8f3bc053 370 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
f24e9980 371 else
b61c2763 372 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
a79832f2 373 if (!msg) {
f24e9980 374 ceph_osdc_put_request(req);
a79832f2 375 return NULL;
f24e9980 376 }
68b4476b 377
f24e9980 378 memset(msg->front.iov_base, 0, msg->front.iov_len);
3499e8a5
YS
379
380 req->r_request = msg;
3499e8a5
YS
381
382 return req;
383}
3d14c5d2 384EXPORT_SYMBOL(ceph_osdc_alloc_request);
3499e8a5 385
a8dd0a37 386static bool osd_req_opcode_valid(u16 opcode)
68b4476b 387{
a8dd0a37 388 switch (opcode) {
68b4476b 389 case CEPH_OSD_OP_READ:
a8dd0a37 390 case CEPH_OSD_OP_STAT:
4c46459c
AE
391 case CEPH_OSD_OP_MAPEXT:
392 case CEPH_OSD_OP_MASKTRUNC:
393 case CEPH_OSD_OP_SPARSE_READ:
a9f36c3e 394 case CEPH_OSD_OP_NOTIFY:
a8dd0a37 395 case CEPH_OSD_OP_NOTIFY_ACK:
4c46459c 396 case CEPH_OSD_OP_ASSERT_VER:
a8dd0a37 397 case CEPH_OSD_OP_WRITE:
4c46459c
AE
398 case CEPH_OSD_OP_WRITEFULL:
399 case CEPH_OSD_OP_TRUNCATE:
400 case CEPH_OSD_OP_ZERO:
401 case CEPH_OSD_OP_DELETE:
402 case CEPH_OSD_OP_APPEND:
a8dd0a37 403 case CEPH_OSD_OP_STARTSYNC:
4c46459c
AE
404 case CEPH_OSD_OP_SETTRUNC:
405 case CEPH_OSD_OP_TRIMTRUNC:
406 case CEPH_OSD_OP_TMAPUP:
407 case CEPH_OSD_OP_TMAPPUT:
408 case CEPH_OSD_OP_TMAPGET:
409 case CEPH_OSD_OP_CREATE:
a9f36c3e 410 case CEPH_OSD_OP_ROLLBACK:
a8dd0a37 411 case CEPH_OSD_OP_WATCH:
4c46459c
AE
412 case CEPH_OSD_OP_OMAPGETKEYS:
413 case CEPH_OSD_OP_OMAPGETVALS:
414 case CEPH_OSD_OP_OMAPGETHEADER:
415 case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
4c46459c
AE
416 case CEPH_OSD_OP_OMAPSETVALS:
417 case CEPH_OSD_OP_OMAPSETHEADER:
418 case CEPH_OSD_OP_OMAPCLEAR:
419 case CEPH_OSD_OP_OMAPRMKEYS:
420 case CEPH_OSD_OP_OMAP_CMP:
421 case CEPH_OSD_OP_CLONERANGE:
422 case CEPH_OSD_OP_ASSERT_SRC_VERSION:
423 case CEPH_OSD_OP_SRC_CMPXATTR:
a9f36c3e 424 case CEPH_OSD_OP_GETXATTR:
4c46459c 425 case CEPH_OSD_OP_GETXATTRS:
a9f36c3e
AE
426 case CEPH_OSD_OP_CMPXATTR:
427 case CEPH_OSD_OP_SETXATTR:
4c46459c
AE
428 case CEPH_OSD_OP_SETXATTRS:
429 case CEPH_OSD_OP_RESETXATTRS:
430 case CEPH_OSD_OP_RMXATTR:
431 case CEPH_OSD_OP_PULL:
432 case CEPH_OSD_OP_PUSH:
433 case CEPH_OSD_OP_BALANCEREADS:
434 case CEPH_OSD_OP_UNBALANCEREADS:
435 case CEPH_OSD_OP_SCRUB:
436 case CEPH_OSD_OP_SCRUB_RESERVE:
437 case CEPH_OSD_OP_SCRUB_UNRESERVE:
438 case CEPH_OSD_OP_SCRUB_STOP:
439 case CEPH_OSD_OP_SCRUB_MAP:
440 case CEPH_OSD_OP_WRLOCK:
441 case CEPH_OSD_OP_WRUNLOCK:
442 case CEPH_OSD_OP_RDLOCK:
443 case CEPH_OSD_OP_RDUNLOCK:
444 case CEPH_OSD_OP_UPLOCK:
445 case CEPH_OSD_OP_DNLOCK:
a8dd0a37 446 case CEPH_OSD_OP_CALL:
4c46459c
AE
447 case CEPH_OSD_OP_PGLS:
448 case CEPH_OSD_OP_PGLS_FILTER:
a8dd0a37
AE
449 return true;
450 default:
451 return false;
452 }
453}
454
33803f33
AE
455/*
456 * This is an osd op init function for opcodes that have no data or
457 * other information associated with them. It also serves as a
458 * common init routine for all the other init functions, below.
459 */
c99d2d4a
AE
460static struct ceph_osd_req_op *
461osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
462 u16 opcode)
33803f33 463{
c99d2d4a
AE
464 struct ceph_osd_req_op *op;
465
466 BUG_ON(which >= osd_req->r_num_ops);
33803f33
AE
467 BUG_ON(!osd_req_opcode_valid(opcode));
468
c99d2d4a 469 op = &osd_req->r_ops[which];
33803f33 470 memset(op, 0, sizeof (*op));
33803f33 471 op->op = opcode;
c99d2d4a
AE
472
473 return op;
33803f33
AE
474}
475
c99d2d4a
AE
476void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
477 unsigned int which, u16 opcode,
33803f33
AE
478 u64 offset, u64 length,
479 u64 truncate_size, u32 truncate_seq)
480{
c99d2d4a 481 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode);
33803f33
AE
482 size_t payload_len = 0;
483
484 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE);
485
33803f33
AE
486 op->extent.offset = offset;
487 op->extent.length = length;
488 op->extent.truncate_size = truncate_size;
489 op->extent.truncate_seq = truncate_seq;
490 if (opcode == CEPH_OSD_OP_WRITE)
491 payload_len += length;
492
493 op->payload_len = payload_len;
494}
495EXPORT_SYMBOL(osd_req_op_extent_init);
496
c99d2d4a
AE
497void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
498 unsigned int which, u64 length)
e5975c7c 499{
c99d2d4a
AE
500 struct ceph_osd_req_op *op;
501 u64 previous;
502
503 BUG_ON(which >= osd_req->r_num_ops);
504 op = &osd_req->r_ops[which];
505 previous = op->extent.length;
e5975c7c
AE
506
507 if (length == previous)
508 return; /* Nothing to do */
509 BUG_ON(length > previous);
510
511 op->extent.length = length;
512 op->payload_len -= previous - length;
513}
514EXPORT_SYMBOL(osd_req_op_extent_update);
515
c99d2d4a 516void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
04017e29 517 u16 opcode, const char *class, const char *method)
33803f33 518{
c99d2d4a 519 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode);
5f562df5 520 struct ceph_pagelist *pagelist;
33803f33
AE
521 size_t payload_len = 0;
522 size_t size;
523
524 BUG_ON(opcode != CEPH_OSD_OP_CALL);
525
5f562df5
AE
526 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
527 BUG_ON(!pagelist);
528 ceph_pagelist_init(pagelist);
529
33803f33
AE
530 op->cls.class_name = class;
531 size = strlen(class);
532 BUG_ON(size > (size_t) U8_MAX);
533 op->cls.class_len = size;
5f562df5 534 ceph_pagelist_append(pagelist, class, size);
33803f33
AE
535 payload_len += size;
536
537 op->cls.method_name = method;
538 size = strlen(method);
539 BUG_ON(size > (size_t) U8_MAX);
540 op->cls.method_len = size;
5f562df5 541 ceph_pagelist_append(pagelist, method, size);
33803f33
AE
542 payload_len += size;
543
a4ce40a9 544 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
5f562df5 545
33803f33
AE
546 op->cls.argc = 0; /* currently unused */
547
548 op->payload_len = payload_len;
549}
550EXPORT_SYMBOL(osd_req_op_cls_init);
8c042b0d 551
c99d2d4a
AE
552void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
553 unsigned int which, u16 opcode,
33803f33
AE
554 u64 cookie, u64 version, int flag)
555{
c99d2d4a 556 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode);
33803f33 557
c99d2d4a 558 BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH);
33803f33
AE
559
560 op->watch.cookie = cookie;
561 /* op->watch.ver = version; */ /* XXX 3847 */
562 op->watch.ver = cpu_to_le64(version);
563 if (opcode == CEPH_OSD_OP_WATCH && flag)
c99d2d4a 564 op->watch.flag = (u8)1;
33803f33
AE
565}
566EXPORT_SYMBOL(osd_req_op_watch_init);
567
90af3602 568static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
ec9123c5
AE
569 struct ceph_osd_data *osd_data)
570{
571 u64 length = ceph_osd_data_length(osd_data);
572
573 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
574 BUG_ON(length > (u64) SIZE_MAX);
575 if (length)
90af3602 576 ceph_msg_data_add_pages(msg, osd_data->pages,
ec9123c5
AE
577 length, osd_data->alignment);
578 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
579 BUG_ON(!length);
90af3602 580 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
ec9123c5
AE
581#ifdef CONFIG_BLOCK
582 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
90af3602 583 ceph_msg_data_add_bio(msg, osd_data->bio, length);
ec9123c5
AE
584#endif
585 } else {
586 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
587 }
588}
589
a8dd0a37 590static u64 osd_req_encode_op(struct ceph_osd_request *req,
79528734 591 struct ceph_osd_op *dst, unsigned int which)
a8dd0a37 592{
79528734 593 struct ceph_osd_req_op *src;
04017e29 594 struct ceph_osd_data *osd_data;
54d50649 595 u64 request_data_len = 0;
04017e29 596 u64 data_length;
a8dd0a37 597
79528734
AE
598 BUG_ON(which >= req->r_num_ops);
599 src = &req->r_ops[which];
a8dd0a37
AE
600 if (WARN_ON(!osd_req_opcode_valid(src->op))) {
601 pr_err("unrecognized osd opcode %d\n", src->op);
602
603 return 0;
604 }
605
606 switch (src->op) {
607 case CEPH_OSD_OP_STAT:
608 break;
609 case CEPH_OSD_OP_READ:
610 case CEPH_OSD_OP_WRITE:
611 if (src->op == CEPH_OSD_OP_WRITE)
54d50649 612 request_data_len = src->extent.length;
a8dd0a37
AE
613 dst->extent.offset = cpu_to_le64(src->extent.offset);
614 dst->extent.length = cpu_to_le64(src->extent.length);
615 dst->extent.truncate_size =
616 cpu_to_le64(src->extent.truncate_size);
617 dst->extent.truncate_seq =
618 cpu_to_le32(src->extent.truncate_seq);
04017e29 619 osd_data = &src->extent.osd_data;
5476492f 620 if (src->op == CEPH_OSD_OP_WRITE)
04017e29 621 ceph_osdc_msg_data_add(req->r_request, osd_data);
5476492f 622 else
04017e29 623 ceph_osdc_msg_data_add(req->r_reply, osd_data);
a8dd0a37
AE
624 break;
625 case CEPH_OSD_OP_CALL:
a8dd0a37
AE
626 dst->cls.class_len = src->cls.class_len;
627 dst->cls.method_len = src->cls.method_len;
04017e29
AE
628 osd_data = &src->cls.request_info;
629 ceph_osdc_msg_data_add(req->r_request, osd_data);
630 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST);
631 request_data_len = osd_data->pagelist->length;
632
633 osd_data = &src->cls.request_data;
634 data_length = ceph_osd_data_length(osd_data);
635 if (data_length) {
636 BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE);
637 dst->cls.indata_len = cpu_to_le32(data_length);
638 ceph_osdc_msg_data_add(req->r_request, osd_data);
639 src->payload_len += data_length;
640 request_data_len += data_length;
641 }
642 osd_data = &src->cls.response_data;
643 ceph_osdc_msg_data_add(req->r_reply, osd_data);
a8dd0a37
AE
644 break;
645 case CEPH_OSD_OP_STARTSYNC:
646 break;
647 case CEPH_OSD_OP_NOTIFY_ACK:
648 case CEPH_OSD_OP_WATCH:
649 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
650 dst->watch.ver = cpu_to_le64(src->watch.ver);
651 dst->watch.flag = src->watch.flag;
652 break;
653 default:
4c46459c 654 pr_err("unsupported osd opcode %s\n",
8f63ca2d 655 ceph_osd_op_name(src->op));
4c46459c 656 WARN_ON(1);
a8dd0a37
AE
657
658 return 0;
68b4476b 659 }
a8dd0a37 660 dst->op = cpu_to_le16(src->op);
68b4476b 661 dst->payload_len = cpu_to_le32(src->payload_len);
175face2 662
54d50649 663 return request_data_len;
68b4476b
YS
664}
665
3499e8a5
YS
666/*
667 * build new request AND message, calculate layout, and adjust file
668 * extent as needed.
669 *
670 * if the file was recently truncated, we include information about its
671 * old and new size so that the object can be updated appropriately. (we
672 * avoid synchronously deleting truncated objects because it's slow.)
673 *
674 * if @do_sync, include a 'startsync' command so that the osd will flush
675 * data quickly.
676 */
677struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
678 struct ceph_file_layout *layout,
679 struct ceph_vino vino,
acead002 680 u64 off, u64 *plen, int num_ops,
3499e8a5
YS
681 int opcode, int flags,
682 struct ceph_snap_context *snapc,
3499e8a5
YS
683 u32 truncate_seq,
684 u64 truncate_size,
153e5167 685 bool use_mempool)
3499e8a5 686{
68b4476b 687 struct ceph_osd_request *req;
75d1c941
AE
688 u64 objnum = 0;
689 u64 objoff = 0;
690 u64 objlen = 0;
d18d1e28
AE
691 u32 object_size;
692 u64 object_base;
6816282d 693 int r;
68b4476b 694
d18d1e28 695 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE);
68b4476b 696
acead002 697 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
ae7ca4a3 698 GFP_NOFS);
4ad12621 699 if (!req)
6816282d 700 return ERR_PTR(-ENOMEM);
79528734 701
d178a9e7 702 req->r_flags = flags;
3499e8a5
YS
703
704 /* calculate max write size */
a19dadfb 705 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
3ff5f385
AE
706 if (r < 0) {
707 ceph_osdc_put_request(req);
6816282d 708 return ERR_PTR(r);
3ff5f385 709 }
a19dadfb 710
d18d1e28
AE
711 object_size = le32_to_cpu(layout->fl_object_size);
712 object_base = off - objoff;
713 if (truncate_size <= object_base) {
714 truncate_size = 0;
715 } else {
716 truncate_size -= object_base;
717 if (truncate_size > object_size)
718 truncate_size = object_size;
a19dadfb 719 }
d18d1e28 720
c99d2d4a 721 osd_req_op_extent_init(req, 0, opcode, objoff, objlen,
b0270324 722 truncate_size, truncate_seq);
8c042b0d 723
acead002
AE
724 /*
725 * A second op in the ops array means the caller wants to
726 * also issue a include a 'startsync' command so that the
727 * osd will flush data quickly.
728 */
729 if (num_ops > 1)
c99d2d4a 730 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
d18d1e28 731
3499e8a5
YS
732 req->r_file_layout = *layout; /* keep a copy */
733
75d1c941
AE
734 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx",
735 vino.ino, objnum);
dbe0fc41
AE
736 req->r_oid_len = strlen(req->r_oid);
737
f24e9980
SW
738 return req;
739}
3d14c5d2 740EXPORT_SYMBOL(ceph_osdc_new_request);
f24e9980
SW
741
742/*
743 * We keep osd requests in an rbtree, sorted by ->r_tid.
744 */
745static void __insert_request(struct ceph_osd_client *osdc,
746 struct ceph_osd_request *new)
747{
748 struct rb_node **p = &osdc->requests.rb_node;
749 struct rb_node *parent = NULL;
750 struct ceph_osd_request *req = NULL;
751
752 while (*p) {
753 parent = *p;
754 req = rb_entry(parent, struct ceph_osd_request, r_node);
755 if (new->r_tid < req->r_tid)
756 p = &(*p)->rb_left;
757 else if (new->r_tid > req->r_tid)
758 p = &(*p)->rb_right;
759 else
760 BUG();
761 }
762
763 rb_link_node(&new->r_node, parent, p);
764 rb_insert_color(&new->r_node, &osdc->requests);
765}
766
767static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
768 u64 tid)
769{
770 struct ceph_osd_request *req;
771 struct rb_node *n = osdc->requests.rb_node;
772
773 while (n) {
774 req = rb_entry(n, struct ceph_osd_request, r_node);
775 if (tid < req->r_tid)
776 n = n->rb_left;
777 else if (tid > req->r_tid)
778 n = n->rb_right;
779 else
780 return req;
781 }
782 return NULL;
783}
784
785static struct ceph_osd_request *
786__lookup_request_ge(struct ceph_osd_client *osdc,
787 u64 tid)
788{
789 struct ceph_osd_request *req;
790 struct rb_node *n = osdc->requests.rb_node;
791
792 while (n) {
793 req = rb_entry(n, struct ceph_osd_request, r_node);
794 if (tid < req->r_tid) {
795 if (!n->rb_left)
796 return req;
797 n = n->rb_left;
798 } else if (tid > req->r_tid) {
799 n = n->rb_right;
800 } else {
801 return req;
802 }
803 }
804 return NULL;
805}
806
6f6c7006
SW
807/*
808 * Resubmit requests pending on the given osd.
809 */
810static void __kick_osd_requests(struct ceph_osd_client *osdc,
811 struct ceph_osd *osd)
812{
a40c4f10 813 struct ceph_osd_request *req, *nreq;
e02493c0 814 LIST_HEAD(resend);
6f6c7006
SW
815 int err;
816
817 dout("__kick_osd_requests osd%d\n", osd->o_osd);
818 err = __reset_osd(osdc, osd);
685a7555 819 if (err)
6f6c7006 820 return;
e02493c0
AE
821 /*
822 * Build up a list of requests to resend by traversing the
823 * osd's list of requests. Requests for a given object are
824 * sent in tid order, and that is also the order they're
825 * kept on this list. Therefore all requests that are in
826 * flight will be found first, followed by all requests that
827 * have not yet been sent. And to resend requests while
828 * preserving this order we will want to put any sent
829 * requests back on the front of the osd client's unsent
830 * list.
831 *
832 * So we build a separate ordered list of already-sent
833 * requests for the affected osd and splice it onto the
834 * front of the osd client's unsent list. Once we've seen a
835 * request that has not yet been sent we're done. Those
836 * requests are already sitting right where they belong.
837 */
6f6c7006 838 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
e02493c0
AE
839 if (!req->r_sent)
840 break;
841 list_move_tail(&req->r_req_lru_item, &resend);
842 dout("requeueing %p tid %llu osd%d\n", req, req->r_tid,
6f6c7006 843 osd->o_osd);
a40c4f10
YS
844 if (!req->r_linger)
845 req->r_flags |= CEPH_OSD_FLAG_RETRY;
846 }
e02493c0 847 list_splice(&resend, &osdc->req_unsent);
a40c4f10 848
e02493c0
AE
849 /*
850 * Linger requests are re-registered before sending, which
851 * sets up a new tid for each. We add them to the unsent
852 * list at the end to keep things in tid order.
853 */
a40c4f10
YS
854 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
855 r_linger_osd) {
77f38e0e
SW
856 /*
857 * reregister request prior to unregistering linger so
858 * that r_osd is preserved.
859 */
860 BUG_ON(!list_empty(&req->r_req_lru_item));
a40c4f10 861 __register_request(osdc, req);
e02493c0 862 list_add_tail(&req->r_req_lru_item, &osdc->req_unsent);
ad885927 863 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
77f38e0e 864 __unregister_linger_request(osdc, req);
a40c4f10
YS
865 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
866 osd->o_osd);
6f6c7006
SW
867 }
868}
869
f24e9980 870/*
81b024e7 871 * If the osd connection drops, we need to resubmit all requests.
f24e9980
SW
872 */
873static void osd_reset(struct ceph_connection *con)
874{
875 struct ceph_osd *osd = con->private;
876 struct ceph_osd_client *osdc;
877
878 if (!osd)
879 return;
880 dout("osd_reset osd%d\n", osd->o_osd);
881 osdc = osd->o_osdc;
f24e9980 882 down_read(&osdc->map_sem);
83aff95e
SW
883 mutex_lock(&osdc->request_mutex);
884 __kick_osd_requests(osdc, osd);
f9d25199 885 __send_queued(osdc);
83aff95e 886 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
887 up_read(&osdc->map_sem);
888}
889
890/*
891 * Track open sessions with osds.
892 */
e10006f8 893static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
f24e9980
SW
894{
895 struct ceph_osd *osd;
896
897 osd = kzalloc(sizeof(*osd), GFP_NOFS);
898 if (!osd)
899 return NULL;
900
901 atomic_set(&osd->o_ref, 1);
902 osd->o_osdc = osdc;
e10006f8 903 osd->o_osd = onum;
f407731d 904 RB_CLEAR_NODE(&osd->o_node);
f24e9980 905 INIT_LIST_HEAD(&osd->o_requests);
a40c4f10 906 INIT_LIST_HEAD(&osd->o_linger_requests);
f5a2041b 907 INIT_LIST_HEAD(&osd->o_osd_lru);
f24e9980
SW
908 osd->o_incarnation = 1;
909
b7a9e5dd 910 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
4e7a5dcd 911
422d2cb8 912 INIT_LIST_HEAD(&osd->o_keepalive_item);
f24e9980
SW
913 return osd;
914}
915
916static struct ceph_osd *get_osd(struct ceph_osd *osd)
917{
918 if (atomic_inc_not_zero(&osd->o_ref)) {
919 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
920 atomic_read(&osd->o_ref));
921 return osd;
922 } else {
923 dout("get_osd %p FAIL\n", osd);
924 return NULL;
925 }
926}
927
928static void put_osd(struct ceph_osd *osd)
929{
930 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
931 atomic_read(&osd->o_ref) - 1);
a255651d 932 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
79494d1b
SW
933 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
934
27859f97 935 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
f24e9980 936 kfree(osd);
79494d1b 937 }
f24e9980
SW
938}
939
940/*
941 * remove an osd from our map
942 */
f5a2041b 943static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 944{
f5a2041b 945 dout("__remove_osd %p\n", osd);
f24e9980
SW
946 BUG_ON(!list_empty(&osd->o_requests));
947 rb_erase(&osd->o_node, &osdc->osds);
f5a2041b 948 list_del_init(&osd->o_osd_lru);
f24e9980
SW
949 ceph_con_close(&osd->o_con);
950 put_osd(osd);
951}
952
aca420bc
SW
953static void remove_all_osds(struct ceph_osd_client *osdc)
954{
048a9d2d 955 dout("%s %p\n", __func__, osdc);
aca420bc
SW
956 mutex_lock(&osdc->request_mutex);
957 while (!RB_EMPTY_ROOT(&osdc->osds)) {
958 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
959 struct ceph_osd, o_node);
960 __remove_osd(osdc, osd);
961 }
962 mutex_unlock(&osdc->request_mutex);
963}
964
f5a2041b
YS
965static void __move_osd_to_lru(struct ceph_osd_client *osdc,
966 struct ceph_osd *osd)
967{
968 dout("__move_osd_to_lru %p\n", osd);
969 BUG_ON(!list_empty(&osd->o_osd_lru));
970 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
3d14c5d2 971 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
f5a2041b
YS
972}
973
974static void __remove_osd_from_lru(struct ceph_osd *osd)
975{
976 dout("__remove_osd_from_lru %p\n", osd);
977 if (!list_empty(&osd->o_osd_lru))
978 list_del_init(&osd->o_osd_lru);
979}
980
aca420bc 981static void remove_old_osds(struct ceph_osd_client *osdc)
f5a2041b
YS
982{
983 struct ceph_osd *osd, *nosd;
984
985 dout("__remove_old_osds %p\n", osdc);
986 mutex_lock(&osdc->request_mutex);
987 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
aca420bc 988 if (time_before(jiffies, osd->lru_ttl))
f5a2041b
YS
989 break;
990 __remove_osd(osdc, osd);
991 }
992 mutex_unlock(&osdc->request_mutex);
993}
994
f24e9980
SW
995/*
996 * reset osd connect
997 */
f5a2041b 998static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 999{
c3acb181 1000 struct ceph_entity_addr *peer_addr;
f24e9980 1001
f5a2041b 1002 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
a40c4f10
YS
1003 if (list_empty(&osd->o_requests) &&
1004 list_empty(&osd->o_linger_requests)) {
f5a2041b 1005 __remove_osd(osdc, osd);
c3acb181
AE
1006
1007 return -ENODEV;
1008 }
1009
1010 peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
1011 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1012 !ceph_con_opened(&osd->o_con)) {
1013 struct ceph_osd_request *req;
1014
87b315a5
SW
1015 dout(" osd addr hasn't changed and connection never opened,"
1016 " letting msgr retry");
1017 /* touch each r_stamp for handle_timeout()'s benfit */
1018 list_for_each_entry(req, &osd->o_requests, r_osd_item)
1019 req->r_stamp = jiffies;
c3acb181
AE
1020
1021 return -EAGAIN;
f24e9980 1022 }
c3acb181
AE
1023
1024 ceph_con_close(&osd->o_con);
1025 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1026 osd->o_incarnation++;
1027
1028 return 0;
f24e9980
SW
1029}
1030
1031static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
1032{
1033 struct rb_node **p = &osdc->osds.rb_node;
1034 struct rb_node *parent = NULL;
1035 struct ceph_osd *osd = NULL;
1036
aca420bc 1037 dout("__insert_osd %p osd%d\n", new, new->o_osd);
f24e9980
SW
1038 while (*p) {
1039 parent = *p;
1040 osd = rb_entry(parent, struct ceph_osd, o_node);
1041 if (new->o_osd < osd->o_osd)
1042 p = &(*p)->rb_left;
1043 else if (new->o_osd > osd->o_osd)
1044 p = &(*p)->rb_right;
1045 else
1046 BUG();
1047 }
1048
1049 rb_link_node(&new->o_node, parent, p);
1050 rb_insert_color(&new->o_node, &osdc->osds);
1051}
1052
1053static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
1054{
1055 struct ceph_osd *osd;
1056 struct rb_node *n = osdc->osds.rb_node;
1057
1058 while (n) {
1059 osd = rb_entry(n, struct ceph_osd, o_node);
1060 if (o < osd->o_osd)
1061 n = n->rb_left;
1062 else if (o > osd->o_osd)
1063 n = n->rb_right;
1064 else
1065 return osd;
1066 }
1067 return NULL;
1068}
1069
422d2cb8
YS
1070static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
1071{
1072 schedule_delayed_work(&osdc->timeout_work,
3d14c5d2 1073 osdc->client->options->osd_keepalive_timeout * HZ);
422d2cb8
YS
1074}
1075
1076static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
1077{
1078 cancel_delayed_work(&osdc->timeout_work);
1079}
f24e9980
SW
1080
1081/*
1082 * Register request, assign tid. If this is the first request, set up
1083 * the timeout event.
1084 */
a40c4f10
YS
1085static void __register_request(struct ceph_osd_client *osdc,
1086 struct ceph_osd_request *req)
f24e9980 1087{
f24e9980 1088 req->r_tid = ++osdc->last_tid;
6df058c0 1089 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
77f38e0e 1090 dout("__register_request %p tid %lld\n", req, req->r_tid);
f24e9980
SW
1091 __insert_request(osdc, req);
1092 ceph_osdc_get_request(req);
1093 osdc->num_requests++;
f24e9980 1094 if (osdc->num_requests == 1) {
422d2cb8
YS
1095 dout(" first request, scheduling timeout\n");
1096 __schedule_osd_timeout(osdc);
f24e9980 1097 }
a40c4f10
YS
1098}
1099
f24e9980
SW
1100/*
1101 * called under osdc->request_mutex
1102 */
1103static void __unregister_request(struct ceph_osd_client *osdc,
1104 struct ceph_osd_request *req)
1105{
35f9f8a0
SW
1106 if (RB_EMPTY_NODE(&req->r_node)) {
1107 dout("__unregister_request %p tid %lld not registered\n",
1108 req, req->r_tid);
1109 return;
1110 }
1111
f24e9980
SW
1112 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
1113 rb_erase(&req->r_node, &osdc->requests);
1114 osdc->num_requests--;
1115
0ba6478d
SW
1116 if (req->r_osd) {
1117 /* make sure the original request isn't in flight. */
6740a845 1118 ceph_msg_revoke(req->r_request);
0ba6478d
SW
1119
1120 list_del_init(&req->r_osd_item);
a40c4f10
YS
1121 if (list_empty(&req->r_osd->o_requests) &&
1122 list_empty(&req->r_osd->o_linger_requests)) {
1123 dout("moving osd to %p lru\n", req->r_osd);
f5a2041b 1124 __move_osd_to_lru(osdc, req->r_osd);
a40c4f10 1125 }
fbdb9190 1126 if (list_empty(&req->r_linger_item))
a40c4f10 1127 req->r_osd = NULL;
0ba6478d 1128 }
f24e9980 1129
7d5f2481 1130 list_del_init(&req->r_req_lru_item);
f24e9980
SW
1131 ceph_osdc_put_request(req);
1132
422d2cb8
YS
1133 if (osdc->num_requests == 0) {
1134 dout(" no requests, canceling timeout\n");
1135 __cancel_osd_timeout(osdc);
f24e9980
SW
1136 }
1137}
1138
1139/*
1140 * Cancel a previously queued request message
1141 */
1142static void __cancel_request(struct ceph_osd_request *req)
1143{
6bc18876 1144 if (req->r_sent && req->r_osd) {
6740a845 1145 ceph_msg_revoke(req->r_request);
f24e9980
SW
1146 req->r_sent = 0;
1147 }
1148}
1149
a40c4f10
YS
1150static void __register_linger_request(struct ceph_osd_client *osdc,
1151 struct ceph_osd_request *req)
1152{
1153 dout("__register_linger_request %p\n", req);
1154 list_add_tail(&req->r_linger_item, &osdc->req_linger);
6194ea89
SW
1155 if (req->r_osd)
1156 list_add_tail(&req->r_linger_osd,
1157 &req->r_osd->o_linger_requests);
a40c4f10
YS
1158}
1159
1160static void __unregister_linger_request(struct ceph_osd_client *osdc,
1161 struct ceph_osd_request *req)
1162{
1163 dout("__unregister_linger_request %p\n", req);
61c74035 1164 list_del_init(&req->r_linger_item);
a40c4f10 1165 if (req->r_osd) {
a40c4f10
YS
1166 list_del_init(&req->r_linger_osd);
1167
1168 if (list_empty(&req->r_osd->o_requests) &&
1169 list_empty(&req->r_osd->o_linger_requests)) {
1170 dout("moving osd to %p lru\n", req->r_osd);
1171 __move_osd_to_lru(osdc, req->r_osd);
1172 }
fbdb9190
SW
1173 if (list_empty(&req->r_osd_item))
1174 req->r_osd = NULL;
a40c4f10
YS
1175 }
1176}
1177
1178void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
1179 struct ceph_osd_request *req)
1180{
1181 mutex_lock(&osdc->request_mutex);
1182 if (req->r_linger) {
1183 __unregister_linger_request(osdc, req);
1184 ceph_osdc_put_request(req);
1185 }
1186 mutex_unlock(&osdc->request_mutex);
1187}
1188EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
1189
1190void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
1191 struct ceph_osd_request *req)
1192{
1193 if (!req->r_linger) {
1194 dout("set_request_linger %p\n", req);
1195 req->r_linger = 1;
1196 /*
1197 * caller is now responsible for calling
1198 * unregister_linger_request
1199 */
1200 ceph_osdc_get_request(req);
1201 }
1202}
1203EXPORT_SYMBOL(ceph_osdc_set_request_linger);
1204
f24e9980
SW
1205/*
1206 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
1207 * (as needed), and set the request r_osd appropriately. If there is
25985edc 1208 * no up osd, set r_osd to NULL. Move the request to the appropriate list
6f6c7006 1209 * (unsent, homeless) or leave on in-flight lru.
f24e9980
SW
1210 *
1211 * Return 0 if unchanged, 1 if changed, or negative on error.
1212 *
1213 * Caller should hold map_sem for read and request_mutex.
1214 */
6f6c7006 1215static int __map_request(struct ceph_osd_client *osdc,
38d6453c 1216 struct ceph_osd_request *req, int force_resend)
f24e9980 1217{
5b191d99 1218 struct ceph_pg pgid;
d85b7056
SW
1219 int acting[CEPH_PG_MAX_SIZE];
1220 int o = -1, num = 0;
f24e9980 1221 int err;
f24e9980 1222
6f6c7006 1223 dout("map_request %p tid %lld\n", req, req->r_tid);
41766f87
AE
1224 err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap,
1225 ceph_file_layout_pg_pool(req->r_file_layout));
6f6c7006
SW
1226 if (err) {
1227 list_move(&req->r_req_lru_item, &osdc->req_notarget);
f24e9980 1228 return err;
6f6c7006 1229 }
7740a42f
SW
1230 req->r_pgid = pgid;
1231
d85b7056
SW
1232 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
1233 if (err > 0) {
1234 o = acting[0];
1235 num = err;
1236 }
f24e9980 1237
38d6453c
SW
1238 if ((!force_resend &&
1239 req->r_osd && req->r_osd->o_osd == o &&
d85b7056
SW
1240 req->r_sent >= req->r_osd->o_incarnation &&
1241 req->r_num_pg_osds == num &&
1242 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
f24e9980
SW
1243 (req->r_osd == NULL && o == -1))
1244 return 0; /* no change */
1245
5b191d99
SW
1246 dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
1247 req->r_tid, pgid.pool, pgid.seed, o,
f24e9980
SW
1248 req->r_osd ? req->r_osd->o_osd : -1);
1249
d85b7056
SW
1250 /* record full pg acting set */
1251 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
1252 req->r_num_pg_osds = num;
1253
f24e9980
SW
1254 if (req->r_osd) {
1255 __cancel_request(req);
1256 list_del_init(&req->r_osd_item);
f24e9980
SW
1257 req->r_osd = NULL;
1258 }
1259
1260 req->r_osd = __lookup_osd(osdc, o);
1261 if (!req->r_osd && o >= 0) {
c99eb1c7 1262 err = -ENOMEM;
e10006f8 1263 req->r_osd = create_osd(osdc, o);
6f6c7006
SW
1264 if (!req->r_osd) {
1265 list_move(&req->r_req_lru_item, &osdc->req_notarget);
c99eb1c7 1266 goto out;
6f6c7006 1267 }
f24e9980 1268
6f6c7006 1269 dout("map_request osd %p is osd%d\n", req->r_osd, o);
f24e9980
SW
1270 __insert_osd(osdc, req->r_osd);
1271
b7a9e5dd
SW
1272 ceph_con_open(&req->r_osd->o_con,
1273 CEPH_ENTITY_TYPE_OSD, o,
1274 &osdc->osdmap->osd_addr[o]);
f24e9980
SW
1275 }
1276
f5a2041b
YS
1277 if (req->r_osd) {
1278 __remove_osd_from_lru(req->r_osd);
ad885927
AE
1279 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
1280 list_move_tail(&req->r_req_lru_item, &osdc->req_unsent);
6f6c7006 1281 } else {
ad885927 1282 list_move_tail(&req->r_req_lru_item, &osdc->req_notarget);
f5a2041b 1283 }
d85b7056 1284 err = 1; /* osd or pg changed */
f24e9980
SW
1285
1286out:
f24e9980
SW
1287 return err;
1288}
1289
1290/*
1291 * caller should hold map_sem (for read) and request_mutex
1292 */
56e925b6
SW
1293static void __send_request(struct ceph_osd_client *osdc,
1294 struct ceph_osd_request *req)
f24e9980 1295{
1b83bef2 1296 void *p;
f24e9980 1297
1b83bef2
SW
1298 dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
1299 req, req->r_tid, req->r_osd->o_osd, req->r_flags,
1300 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
1301
1302 /* fill in message content that changes each time we send it */
1303 put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
1304 put_unaligned_le32(req->r_flags, req->r_request_flags);
1305 put_unaligned_le64(req->r_pgid.pool, req->r_request_pool);
1306 p = req->r_request_pgid;
1307 ceph_encode_64(&p, req->r_pgid.pool);
1308 ceph_encode_32(&p, req->r_pgid.seed);
1309 put_unaligned_le64(1, req->r_request_attempts); /* FIXME */
1310 memcpy(req->r_request_reassert_version, &req->r_reassert_version,
1311 sizeof(req->r_reassert_version));
2169aea6 1312
3dd72fc0 1313 req->r_stamp = jiffies;
07a27e22 1314 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
f24e9980
SW
1315
1316 ceph_msg_get(req->r_request); /* send consumes a ref */
1317 ceph_con_send(&req->r_osd->o_con, req->r_request);
1318 req->r_sent = req->r_osd->o_incarnation;
f24e9980
SW
1319}
1320
6f6c7006
SW
1321/*
1322 * Send any requests in the queue (req_unsent).
1323 */
f9d25199 1324static void __send_queued(struct ceph_osd_client *osdc)
6f6c7006
SW
1325{
1326 struct ceph_osd_request *req, *tmp;
1327
f9d25199
AE
1328 dout("__send_queued\n");
1329 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
6f6c7006 1330 __send_request(osdc, req);
6f6c7006
SW
1331}
1332
f24e9980
SW
1333/*
1334 * Timeout callback, called every N seconds when 1 or more osd
1335 * requests has been active for more than N seconds. When this
1336 * happens, we ping all OSDs with requests who have timed out to
1337 * ensure any communications channel reset is detected. Reset the
1338 * request timeouts another N seconds in the future as we go.
1339 * Reschedule the timeout event another N seconds in future (unless
1340 * there are no open requests).
1341 */
1342static void handle_timeout(struct work_struct *work)
1343{
1344 struct ceph_osd_client *osdc =
1345 container_of(work, struct ceph_osd_client, timeout_work.work);
83aff95e 1346 struct ceph_osd_request *req;
f24e9980 1347 struct ceph_osd *osd;
422d2cb8 1348 unsigned long keepalive =
3d14c5d2 1349 osdc->client->options->osd_keepalive_timeout * HZ;
422d2cb8 1350 struct list_head slow_osds;
f24e9980
SW
1351 dout("timeout\n");
1352 down_read(&osdc->map_sem);
1353
1354 ceph_monc_request_next_osdmap(&osdc->client->monc);
1355
1356 mutex_lock(&osdc->request_mutex);
f24e9980 1357
422d2cb8
YS
1358 /*
1359 * ping osds that are a bit slow. this ensures that if there
1360 * is a break in the TCP connection we will notice, and reopen
1361 * a connection with that osd (from the fault callback).
1362 */
1363 INIT_LIST_HEAD(&slow_osds);
1364 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
3dd72fc0 1365 if (time_before(jiffies, req->r_stamp + keepalive))
422d2cb8
YS
1366 break;
1367
1368 osd = req->r_osd;
1369 BUG_ON(!osd);
1370 dout(" tid %llu is slow, will send keepalive on osd%d\n",
f24e9980 1371 req->r_tid, osd->o_osd);
422d2cb8
YS
1372 list_move_tail(&osd->o_keepalive_item, &slow_osds);
1373 }
1374 while (!list_empty(&slow_osds)) {
1375 osd = list_entry(slow_osds.next, struct ceph_osd,
1376 o_keepalive_item);
1377 list_del_init(&osd->o_keepalive_item);
f24e9980
SW
1378 ceph_con_keepalive(&osd->o_con);
1379 }
1380
422d2cb8 1381 __schedule_osd_timeout(osdc);
f9d25199 1382 __send_queued(osdc);
f24e9980 1383 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1384 up_read(&osdc->map_sem);
1385}
1386
f5a2041b
YS
1387static void handle_osds_timeout(struct work_struct *work)
1388{
1389 struct ceph_osd_client *osdc =
1390 container_of(work, struct ceph_osd_client,
1391 osds_timeout_work.work);
1392 unsigned long delay =
3d14c5d2 1393 osdc->client->options->osd_idle_ttl * HZ >> 2;
f5a2041b
YS
1394
1395 dout("osds timeout\n");
1396 down_read(&osdc->map_sem);
aca420bc 1397 remove_old_osds(osdc);
f5a2041b
YS
1398 up_read(&osdc->map_sem);
1399
1400 schedule_delayed_work(&osdc->osds_timeout_work,
1401 round_jiffies_relative(delay));
1402}
1403
25845472
SW
1404static void complete_request(struct ceph_osd_request *req)
1405{
1406 if (req->r_safe_callback)
1407 req->r_safe_callback(req, NULL);
1408 complete_all(&req->r_safe_completion); /* fsync waiter */
1409}
1410
f24e9980
SW
1411/*
1412 * handle osd op reply. either call the callback if it is specified,
1413 * or do the completion to wake up the waiting thread.
1414 */
350b1c32
SW
1415static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1416 struct ceph_connection *con)
f24e9980 1417{
1b83bef2 1418 void *p, *end;
f24e9980
SW
1419 struct ceph_osd_request *req;
1420 u64 tid;
1b83bef2 1421 int object_len;
79528734
AE
1422 unsigned int numops;
1423 int payload_len, flags;
0ceed5db 1424 s32 result;
1b83bef2
SW
1425 s32 retry_attempt;
1426 struct ceph_pg pg;
1427 int err;
1428 u32 reassert_epoch;
1429 u64 reassert_version;
1430 u32 osdmap_epoch;
0d5af164 1431 int already_completed;
9fc6e064 1432 u32 bytes;
79528734 1433 unsigned int i;
f24e9980 1434
6df058c0 1435 tid = le64_to_cpu(msg->hdr.tid);
1b83bef2
SW
1436 dout("handle_reply %p tid %llu\n", msg, tid);
1437
1438 p = msg->front.iov_base;
1439 end = p + msg->front.iov_len;
1440
1441 ceph_decode_need(&p, end, 4, bad);
1442 object_len = ceph_decode_32(&p);
1443 ceph_decode_need(&p, end, object_len, bad);
1444 p += object_len;
1445
ef4859d6 1446 err = ceph_decode_pgid(&p, end, &pg);
1b83bef2 1447 if (err)
f24e9980 1448 goto bad;
1b83bef2
SW
1449
1450 ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
1451 flags = ceph_decode_64(&p);
1452 result = ceph_decode_32(&p);
1453 reassert_epoch = ceph_decode_32(&p);
1454 reassert_version = ceph_decode_64(&p);
1455 osdmap_epoch = ceph_decode_32(&p);
1456
f24e9980
SW
1457 /* lookup */
1458 mutex_lock(&osdc->request_mutex);
1459 req = __lookup_request(osdc, tid);
1460 if (req == NULL) {
1461 dout("handle_reply tid %llu dne\n", tid);
8058fd45 1462 goto bad_mutex;
f24e9980
SW
1463 }
1464 ceph_osdc_get_request(req);
1b83bef2
SW
1465
1466 dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
1467 req, result);
1468
1469 ceph_decode_need(&p, end, 4, bad);
1470 numops = ceph_decode_32(&p);
1471 if (numops > CEPH_OSD_MAX_OP)
1472 goto bad_put;
1473 if (numops != req->r_num_ops)
1474 goto bad_put;
1475 payload_len = 0;
1476 ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad);
1477 for (i = 0; i < numops; i++) {
1478 struct ceph_osd_op *op = p;
1479 int len;
1480
1481 len = le32_to_cpu(op->payload_len);
1482 req->r_reply_op_len[i] = len;
1483 dout(" op %d has %d bytes\n", i, len);
1484 payload_len += len;
1485 p += sizeof(*op);
1486 }
9fc6e064
AE
1487 bytes = le32_to_cpu(msg->hdr.data_len);
1488 if (payload_len != bytes) {
1b83bef2 1489 pr_warning("sum of op payload lens %d != data_len %d",
9fc6e064 1490 payload_len, bytes);
1b83bef2
SW
1491 goto bad_put;
1492 }
1493
1494 ceph_decode_need(&p, end, 4 + numops * 4, bad);
1495 retry_attempt = ceph_decode_32(&p);
1496 for (i = 0; i < numops; i++)
1497 req->r_reply_op_result[i] = ceph_decode_32(&p);
f24e9980 1498
f24e9980 1499 if (!req->r_got_reply) {
f24e9980 1500
1b83bef2 1501 req->r_result = result;
f24e9980
SW
1502 dout("handle_reply result %d bytes %d\n", req->r_result,
1503 bytes);
1504 if (req->r_result == 0)
1505 req->r_result = bytes;
1506
1507 /* in case this is a write and we need to replay, */
1b83bef2
SW
1508 req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
1509 req->r_reassert_version.version = cpu_to_le64(reassert_version);
f24e9980
SW
1510
1511 req->r_got_reply = 1;
1512 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1513 dout("handle_reply tid %llu dup ack\n", tid);
34b43a56 1514 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1515 goto done;
1516 }
1517
1518 dout("handle_reply tid %llu flags %d\n", tid, flags);
1519
a40c4f10
YS
1520 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
1521 __register_linger_request(osdc, req);
1522
f24e9980 1523 /* either this is a read, or we got the safe response */
0ceed5db
SW
1524 if (result < 0 ||
1525 (flags & CEPH_OSD_FLAG_ONDISK) ||
f24e9980
SW
1526 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
1527 __unregister_request(osdc, req);
1528
0d5af164
AE
1529 already_completed = req->r_completed;
1530 req->r_completed = 1;
f24e9980 1531 mutex_unlock(&osdc->request_mutex);
0d5af164
AE
1532 if (already_completed)
1533 goto done;
f24e9980
SW
1534
1535 if (req->r_callback)
1536 req->r_callback(req, msg);
1537 else
03066f23 1538 complete_all(&req->r_completion);
f24e9980 1539
25845472
SW
1540 if (flags & CEPH_OSD_FLAG_ONDISK)
1541 complete_request(req);
f24e9980
SW
1542
1543done:
a40c4f10 1544 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
f24e9980
SW
1545 ceph_osdc_put_request(req);
1546 return;
1547
1b83bef2
SW
1548bad_put:
1549 ceph_osdc_put_request(req);
8058fd45
AE
1550bad_mutex:
1551 mutex_unlock(&osdc->request_mutex);
f24e9980 1552bad:
1b83bef2
SW
1553 pr_err("corrupt osd_op_reply got %d %d\n",
1554 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
9ec7cab1 1555 ceph_msg_dump(msg);
f24e9980
SW
1556}
1557
6f6c7006 1558static void reset_changed_osds(struct ceph_osd_client *osdc)
f24e9980 1559{
f24e9980 1560 struct rb_node *p, *n;
f24e9980 1561
6f6c7006
SW
1562 for (p = rb_first(&osdc->osds); p; p = n) {
1563 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
f24e9980 1564
6f6c7006
SW
1565 n = rb_next(p);
1566 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1567 memcmp(&osd->o_con.peer_addr,
1568 ceph_osd_addr(osdc->osdmap,
1569 osd->o_osd),
1570 sizeof(struct ceph_entity_addr)) != 0)
1571 __reset_osd(osdc, osd);
f24e9980 1572 }
422d2cb8
YS
1573}
1574
1575/*
6f6c7006
SW
1576 * Requeue requests whose mapping to an OSD has changed. If requests map to
1577 * no osd, request a new map.
422d2cb8 1578 *
e6d50f67 1579 * Caller should hold map_sem for read.
422d2cb8 1580 */
38d6453c 1581static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
422d2cb8 1582{
a40c4f10 1583 struct ceph_osd_request *req, *nreq;
6f6c7006
SW
1584 struct rb_node *p;
1585 int needmap = 0;
1586 int err;
422d2cb8 1587
38d6453c 1588 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
422d2cb8 1589 mutex_lock(&osdc->request_mutex);
6194ea89 1590 for (p = rb_first(&osdc->requests); p; ) {
6f6c7006 1591 req = rb_entry(p, struct ceph_osd_request, r_node);
6194ea89 1592 p = rb_next(p);
ab60b16d
AE
1593
1594 /*
1595 * For linger requests that have not yet been
1596 * registered, move them to the linger list; they'll
1597 * be sent to the osd in the loop below. Unregister
1598 * the request before re-registering it as a linger
1599 * request to ensure the __map_request() below
1600 * will decide it needs to be sent.
1601 */
1602 if (req->r_linger && list_empty(&req->r_linger_item)) {
1603 dout("%p tid %llu restart on osd%d\n",
1604 req, req->r_tid,
1605 req->r_osd ? req->r_osd->o_osd : -1);
1606 __unregister_request(osdc, req);
1607 __register_linger_request(osdc, req);
1608 continue;
1609 }
1610
38d6453c 1611 err = __map_request(osdc, req, force_resend);
6f6c7006
SW
1612 if (err < 0)
1613 continue; /* error */
1614 if (req->r_osd == NULL) {
1615 dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1616 needmap++; /* request a newer map */
1617 } else if (err > 0) {
6194ea89
SW
1618 if (!req->r_linger) {
1619 dout("%p tid %llu requeued on osd%d\n", req,
1620 req->r_tid,
1621 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1622 req->r_flags |= CEPH_OSD_FLAG_RETRY;
6194ea89
SW
1623 }
1624 }
a40c4f10
YS
1625 }
1626
1627 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
1628 r_linger_item) {
1629 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1630
38d6453c 1631 err = __map_request(osdc, req, force_resend);
ab60b16d 1632 dout("__map_request returned %d\n", err);
a40c4f10
YS
1633 if (err == 0)
1634 continue; /* no change and no osd was specified */
1635 if (err < 0)
1636 continue; /* hrm! */
1637 if (req->r_osd == NULL) {
1638 dout("tid %llu maps to no valid osd\n", req->r_tid);
1639 needmap++; /* request a newer map */
1640 continue;
6f6c7006 1641 }
a40c4f10
YS
1642
1643 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1644 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1645 __register_request(osdc, req);
c89ce05e 1646 __unregister_linger_request(osdc, req);
6f6c7006 1647 }
f24e9980
SW
1648 mutex_unlock(&osdc->request_mutex);
1649
1650 if (needmap) {
1651 dout("%d requests for down osds, need new map\n", needmap);
1652 ceph_monc_request_next_osdmap(&osdc->client->monc);
1653 }
e6d50f67 1654 reset_changed_osds(osdc);
422d2cb8 1655}
6f6c7006
SW
1656
1657
f24e9980
SW
1658/*
1659 * Process updated osd map.
1660 *
1661 * The message contains any number of incremental and full maps, normally
1662 * indicating some sort of topology change in the cluster. Kick requests
1663 * off to different OSDs as needed.
1664 */
1665void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1666{
1667 void *p, *end, *next;
1668 u32 nr_maps, maplen;
1669 u32 epoch;
1670 struct ceph_osdmap *newmap = NULL, *oldmap;
1671 int err;
1672 struct ceph_fsid fsid;
1673
1674 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
1675 p = msg->front.iov_base;
1676 end = p + msg->front.iov_len;
1677
1678 /* verify fsid */
1679 ceph_decode_need(&p, end, sizeof(fsid), bad);
1680 ceph_decode_copy(&p, &fsid, sizeof(fsid));
0743304d
SW
1681 if (ceph_check_fsid(osdc->client, &fsid) < 0)
1682 return;
f24e9980
SW
1683
1684 down_write(&osdc->map_sem);
1685
1686 /* incremental maps */
1687 ceph_decode_32_safe(&p, end, nr_maps, bad);
1688 dout(" %d inc maps\n", nr_maps);
1689 while (nr_maps > 0) {
1690 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1691 epoch = ceph_decode_32(&p);
1692 maplen = ceph_decode_32(&p);
f24e9980
SW
1693 ceph_decode_need(&p, end, maplen, bad);
1694 next = p + maplen;
1695 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
1696 dout("applying incremental map %u len %d\n",
1697 epoch, maplen);
1698 newmap = osdmap_apply_incremental(&p, next,
1699 osdc->osdmap,
15d9882c 1700 &osdc->client->msgr);
f24e9980
SW
1701 if (IS_ERR(newmap)) {
1702 err = PTR_ERR(newmap);
1703 goto bad;
1704 }
30dc6381 1705 BUG_ON(!newmap);
f24e9980
SW
1706 if (newmap != osdc->osdmap) {
1707 ceph_osdmap_destroy(osdc->osdmap);
1708 osdc->osdmap = newmap;
1709 }
38d6453c 1710 kick_requests(osdc, 0);
f24e9980
SW
1711 } else {
1712 dout("ignoring incremental map %u len %d\n",
1713 epoch, maplen);
1714 }
1715 p = next;
1716 nr_maps--;
1717 }
1718 if (newmap)
1719 goto done;
1720
1721 /* full maps */
1722 ceph_decode_32_safe(&p, end, nr_maps, bad);
1723 dout(" %d full maps\n", nr_maps);
1724 while (nr_maps) {
1725 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1726 epoch = ceph_decode_32(&p);
1727 maplen = ceph_decode_32(&p);
f24e9980
SW
1728 ceph_decode_need(&p, end, maplen, bad);
1729 if (nr_maps > 1) {
1730 dout("skipping non-latest full map %u len %d\n",
1731 epoch, maplen);
1732 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
1733 dout("skipping full map %u len %d, "
1734 "older than our %u\n", epoch, maplen,
1735 osdc->osdmap->epoch);
1736 } else {
38d6453c
SW
1737 int skipped_map = 0;
1738
f24e9980
SW
1739 dout("taking full map %u len %d\n", epoch, maplen);
1740 newmap = osdmap_decode(&p, p+maplen);
1741 if (IS_ERR(newmap)) {
1742 err = PTR_ERR(newmap);
1743 goto bad;
1744 }
30dc6381 1745 BUG_ON(!newmap);
f24e9980
SW
1746 oldmap = osdc->osdmap;
1747 osdc->osdmap = newmap;
38d6453c
SW
1748 if (oldmap) {
1749 if (oldmap->epoch + 1 < newmap->epoch)
1750 skipped_map = 1;
f24e9980 1751 ceph_osdmap_destroy(oldmap);
38d6453c
SW
1752 }
1753 kick_requests(osdc, skipped_map);
f24e9980
SW
1754 }
1755 p += maplen;
1756 nr_maps--;
1757 }
1758
1759done:
1760 downgrade_write(&osdc->map_sem);
1761 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
cd634fb6
SW
1762
1763 /*
1764 * subscribe to subsequent osdmap updates if full to ensure
1765 * we find out when we are no longer full and stop returning
1766 * ENOSPC.
1767 */
1768 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1769 ceph_monc_request_next_osdmap(&osdc->client->monc);
1770
f9d25199
AE
1771 mutex_lock(&osdc->request_mutex);
1772 __send_queued(osdc);
1773 mutex_unlock(&osdc->request_mutex);
f24e9980 1774 up_read(&osdc->map_sem);
03066f23 1775 wake_up_all(&osdc->client->auth_wq);
f24e9980
SW
1776 return;
1777
1778bad:
1779 pr_err("osdc handle_map corrupt msg\n");
9ec7cab1 1780 ceph_msg_dump(msg);
f24e9980
SW
1781 up_write(&osdc->map_sem);
1782 return;
1783}
1784
a40c4f10
YS
1785/*
1786 * watch/notify callback event infrastructure
1787 *
1788 * These callbacks are used both for watch and notify operations.
1789 */
1790static void __release_event(struct kref *kref)
1791{
1792 struct ceph_osd_event *event =
1793 container_of(kref, struct ceph_osd_event, kref);
1794
1795 dout("__release_event %p\n", event);
1796 kfree(event);
1797}
1798
1799static void get_event(struct ceph_osd_event *event)
1800{
1801 kref_get(&event->kref);
1802}
1803
1804void ceph_osdc_put_event(struct ceph_osd_event *event)
1805{
1806 kref_put(&event->kref, __release_event);
1807}
1808EXPORT_SYMBOL(ceph_osdc_put_event);
1809
1810static void __insert_event(struct ceph_osd_client *osdc,
1811 struct ceph_osd_event *new)
1812{
1813 struct rb_node **p = &osdc->event_tree.rb_node;
1814 struct rb_node *parent = NULL;
1815 struct ceph_osd_event *event = NULL;
1816
1817 while (*p) {
1818 parent = *p;
1819 event = rb_entry(parent, struct ceph_osd_event, node);
1820 if (new->cookie < event->cookie)
1821 p = &(*p)->rb_left;
1822 else if (new->cookie > event->cookie)
1823 p = &(*p)->rb_right;
1824 else
1825 BUG();
1826 }
1827
1828 rb_link_node(&new->node, parent, p);
1829 rb_insert_color(&new->node, &osdc->event_tree);
1830}
1831
1832static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
1833 u64 cookie)
1834{
1835 struct rb_node **p = &osdc->event_tree.rb_node;
1836 struct rb_node *parent = NULL;
1837 struct ceph_osd_event *event = NULL;
1838
1839 while (*p) {
1840 parent = *p;
1841 event = rb_entry(parent, struct ceph_osd_event, node);
1842 if (cookie < event->cookie)
1843 p = &(*p)->rb_left;
1844 else if (cookie > event->cookie)
1845 p = &(*p)->rb_right;
1846 else
1847 return event;
1848 }
1849 return NULL;
1850}
1851
1852static void __remove_event(struct ceph_osd_event *event)
1853{
1854 struct ceph_osd_client *osdc = event->osdc;
1855
1856 if (!RB_EMPTY_NODE(&event->node)) {
1857 dout("__remove_event removed %p\n", event);
1858 rb_erase(&event->node, &osdc->event_tree);
1859 ceph_osdc_put_event(event);
1860 } else {
1861 dout("__remove_event didn't remove %p\n", event);
1862 }
1863}
1864
1865int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1866 void (*event_cb)(u64, u64, u8, void *),
3c663bbd 1867 void *data, struct ceph_osd_event **pevent)
a40c4f10
YS
1868{
1869 struct ceph_osd_event *event;
1870
1871 event = kmalloc(sizeof(*event), GFP_NOIO);
1872 if (!event)
1873 return -ENOMEM;
1874
1875 dout("create_event %p\n", event);
1876 event->cb = event_cb;
3c663bbd 1877 event->one_shot = 0;
a40c4f10
YS
1878 event->data = data;
1879 event->osdc = osdc;
1880 INIT_LIST_HEAD(&event->osd_node);
3ee5234d 1881 RB_CLEAR_NODE(&event->node);
a40c4f10
YS
1882 kref_init(&event->kref); /* one ref for us */
1883 kref_get(&event->kref); /* one ref for the caller */
a40c4f10
YS
1884
1885 spin_lock(&osdc->event_lock);
1886 event->cookie = ++osdc->event_count;
1887 __insert_event(osdc, event);
1888 spin_unlock(&osdc->event_lock);
1889
1890 *pevent = event;
1891 return 0;
1892}
1893EXPORT_SYMBOL(ceph_osdc_create_event);
1894
1895void ceph_osdc_cancel_event(struct ceph_osd_event *event)
1896{
1897 struct ceph_osd_client *osdc = event->osdc;
1898
1899 dout("cancel_event %p\n", event);
1900 spin_lock(&osdc->event_lock);
1901 __remove_event(event);
1902 spin_unlock(&osdc->event_lock);
1903 ceph_osdc_put_event(event); /* caller's */
1904}
1905EXPORT_SYMBOL(ceph_osdc_cancel_event);
1906
1907
1908static void do_event_work(struct work_struct *work)
1909{
1910 struct ceph_osd_event_work *event_work =
1911 container_of(work, struct ceph_osd_event_work, work);
1912 struct ceph_osd_event *event = event_work->event;
1913 u64 ver = event_work->ver;
1914 u64 notify_id = event_work->notify_id;
1915 u8 opcode = event_work->opcode;
1916
1917 dout("do_event_work completing %p\n", event);
1918 event->cb(ver, notify_id, opcode, event->data);
a40c4f10
YS
1919 dout("do_event_work completed %p\n", event);
1920 ceph_osdc_put_event(event);
1921 kfree(event_work);
1922}
1923
1924
1925/*
1926 * Process osd watch notifications
1927 */
3c663bbd
AE
1928static void handle_watch_notify(struct ceph_osd_client *osdc,
1929 struct ceph_msg *msg)
a40c4f10
YS
1930{
1931 void *p, *end;
1932 u8 proto_ver;
1933 u64 cookie, ver, notify_id;
1934 u8 opcode;
1935 struct ceph_osd_event *event;
1936 struct ceph_osd_event_work *event_work;
1937
1938 p = msg->front.iov_base;
1939 end = p + msg->front.iov_len;
1940
1941 ceph_decode_8_safe(&p, end, proto_ver, bad);
1942 ceph_decode_8_safe(&p, end, opcode, bad);
1943 ceph_decode_64_safe(&p, end, cookie, bad);
1944 ceph_decode_64_safe(&p, end, ver, bad);
1945 ceph_decode_64_safe(&p, end, notify_id, bad);
1946
1947 spin_lock(&osdc->event_lock);
1948 event = __find_event(osdc, cookie);
1949 if (event) {
3c663bbd 1950 BUG_ON(event->one_shot);
a40c4f10 1951 get_event(event);
a40c4f10
YS
1952 }
1953 spin_unlock(&osdc->event_lock);
1954 dout("handle_watch_notify cookie %lld ver %lld event %p\n",
1955 cookie, ver, event);
1956 if (event) {
1957 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
a40c4f10
YS
1958 if (!event_work) {
1959 dout("ERROR: could not allocate event_work\n");
1960 goto done_err;
1961 }
6b0ae409 1962 INIT_WORK(&event_work->work, do_event_work);
a40c4f10
YS
1963 event_work->event = event;
1964 event_work->ver = ver;
1965 event_work->notify_id = notify_id;
1966 event_work->opcode = opcode;
1967 if (!queue_work(osdc->notify_wq, &event_work->work)) {
1968 dout("WARNING: failed to queue notify event work\n");
1969 goto done_err;
1970 }
1971 }
1972
1973 return;
1974
1975done_err:
a40c4f10
YS
1976 ceph_osdc_put_event(event);
1977 return;
1978
1979bad:
1980 pr_err("osdc handle_watch_notify corrupt msg\n");
1981 return;
1982}
1983
e65550fd
AE
1984/*
1985 * build new request AND message
1986 *
1987 */
1988void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
1989 struct ceph_snap_context *snapc, u64 snap_id,
1990 struct timespec *mtime)
1991{
1992 struct ceph_msg *msg = req->r_request;
1993 void *p;
1994 size_t msg_size;
1995 int flags = req->r_flags;
1996 u64 data_len;
1997 unsigned int i;
1998
1999 req->r_snapid = snap_id;
2000 req->r_snapc = ceph_get_snap_context(snapc);
2001
2002 /* encode request */
2003 msg->hdr.version = cpu_to_le16(4);
2004
2005 p = msg->front.iov_base;
2006 ceph_encode_32(&p, 1); /* client_inc is always 1 */
2007 req->r_request_osdmap_epoch = p;
2008 p += 4;
2009 req->r_request_flags = p;
2010 p += 4;
2011 if (req->r_flags & CEPH_OSD_FLAG_WRITE)
2012 ceph_encode_timespec(p, mtime);
2013 p += sizeof(struct ceph_timespec);
2014 req->r_request_reassert_version = p;
2015 p += sizeof(struct ceph_eversion); /* will get filled in */
2016
2017 /* oloc */
2018 ceph_encode_8(&p, 4);
2019 ceph_encode_8(&p, 4);
2020 ceph_encode_32(&p, 8 + 4 + 4);
2021 req->r_request_pool = p;
2022 p += 8;
2023 ceph_encode_32(&p, -1); /* preferred */
2024 ceph_encode_32(&p, 0); /* key len */
2025
2026 ceph_encode_8(&p, 1);
2027 req->r_request_pgid = p;
2028 p += 8 + 4;
2029 ceph_encode_32(&p, -1); /* preferred */
2030
2031 /* oid */
2032 ceph_encode_32(&p, req->r_oid_len);
2033 memcpy(p, req->r_oid, req->r_oid_len);
2034 dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len);
2035 p += req->r_oid_len;
2036
2037 /* ops--can imply data */
2038 ceph_encode_16(&p, (u16)req->r_num_ops);
2039 data_len = 0;
2040 for (i = 0; i < req->r_num_ops; i++) {
2041 data_len += osd_req_encode_op(req, p, i);
2042 p += sizeof(struct ceph_osd_op);
2043 }
2044
2045 /* snaps */
2046 ceph_encode_64(&p, req->r_snapid);
2047 ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
2048 ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
2049 if (req->r_snapc) {
2050 for (i = 0; i < snapc->num_snaps; i++) {
2051 ceph_encode_64(&p, req->r_snapc->snaps[i]);
2052 }
2053 }
2054
2055 req->r_request_attempts = p;
2056 p += 4;
2057
2058 /* data */
2059 if (flags & CEPH_OSD_FLAG_WRITE) {
2060 u16 data_off;
2061
2062 /*
2063 * The header "data_off" is a hint to the receiver
2064 * allowing it to align received data into its
2065 * buffers such that there's no need to re-copy
2066 * it before writing it to disk (direct I/O).
2067 */
2068 data_off = (u16) (off & 0xffff);
2069 req->r_request->hdr.data_off = cpu_to_le16(data_off);
2070 }
2071 req->r_request->hdr.data_len = cpu_to_le32(data_len);
2072
2073 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
2074 msg_size = p - msg->front.iov_base;
2075 msg->front.iov_len = msg_size;
2076 msg->hdr.front_len = cpu_to_le32(msg_size);
2077
2078 dout("build_request msg_size was %d\n", (int)msg_size);
2079}
2080EXPORT_SYMBOL(ceph_osdc_build_request);
2081
70636773
AE
2082/*
2083 * Register request, send initial attempt.
2084 */
2085int ceph_osdc_start_request(struct ceph_osd_client *osdc,
2086 struct ceph_osd_request *req,
2087 bool nofail)
2088{
2089 int rc = 0;
2090
f24e9980
SW
2091 down_read(&osdc->map_sem);
2092 mutex_lock(&osdc->request_mutex);
dc4b870c 2093 __register_request(osdc, req);
92451b49
AE
2094 WARN_ON(req->r_sent);
2095 rc = __map_request(osdc, req, 0);
2096 if (rc < 0) {
2097 if (nofail) {
2098 dout("osdc_start_request failed map, "
2099 " will retry %lld\n", req->r_tid);
2100 rc = 0;
f24e9980 2101 }
92451b49 2102 goto out_unlock;
f24e9980 2103 }
92451b49
AE
2104 if (req->r_osd == NULL) {
2105 dout("send_request %p no up osds in pg\n", req);
2106 ceph_monc_request_next_osdmap(&osdc->client->monc);
2107 } else {
7e2766a1 2108 __send_queued(osdc);
92451b49
AE
2109 }
2110 rc = 0;
234af26f 2111out_unlock:
f24e9980
SW
2112 mutex_unlock(&osdc->request_mutex);
2113 up_read(&osdc->map_sem);
2114 return rc;
2115}
3d14c5d2 2116EXPORT_SYMBOL(ceph_osdc_start_request);
f24e9980
SW
2117
2118/*
2119 * wait for a request to complete
2120 */
2121int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
2122 struct ceph_osd_request *req)
2123{
2124 int rc;
2125
2126 rc = wait_for_completion_interruptible(&req->r_completion);
2127 if (rc < 0) {
2128 mutex_lock(&osdc->request_mutex);
2129 __cancel_request(req);
529cfcc4 2130 __unregister_request(osdc, req);
f24e9980 2131 mutex_unlock(&osdc->request_mutex);
25845472 2132 complete_request(req);
529cfcc4 2133 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
f24e9980
SW
2134 return rc;
2135 }
2136
2137 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
2138 return req->r_result;
2139}
3d14c5d2 2140EXPORT_SYMBOL(ceph_osdc_wait_request);
f24e9980
SW
2141
2142/*
2143 * sync - wait for all in-flight requests to flush. avoid starvation.
2144 */
2145void ceph_osdc_sync(struct ceph_osd_client *osdc)
2146{
2147 struct ceph_osd_request *req;
2148 u64 last_tid, next_tid = 0;
2149
2150 mutex_lock(&osdc->request_mutex);
2151 last_tid = osdc->last_tid;
2152 while (1) {
2153 req = __lookup_request_ge(osdc, next_tid);
2154 if (!req)
2155 break;
2156 if (req->r_tid > last_tid)
2157 break;
2158
2159 next_tid = req->r_tid + 1;
2160 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
2161 continue;
2162
2163 ceph_osdc_get_request(req);
2164 mutex_unlock(&osdc->request_mutex);
2165 dout("sync waiting on tid %llu (last is %llu)\n",
2166 req->r_tid, last_tid);
2167 wait_for_completion(&req->r_safe_completion);
2168 mutex_lock(&osdc->request_mutex);
2169 ceph_osdc_put_request(req);
2170 }
2171 mutex_unlock(&osdc->request_mutex);
2172 dout("sync done (thru tid %llu)\n", last_tid);
2173}
3d14c5d2 2174EXPORT_SYMBOL(ceph_osdc_sync);
f24e9980
SW
2175
2176/*
2177 * init, shutdown
2178 */
2179int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
2180{
2181 int err;
2182
2183 dout("init\n");
2184 osdc->client = client;
2185 osdc->osdmap = NULL;
2186 init_rwsem(&osdc->map_sem);
2187 init_completion(&osdc->map_waiters);
2188 osdc->last_requested_map = 0;
2189 mutex_init(&osdc->request_mutex);
f24e9980
SW
2190 osdc->last_tid = 0;
2191 osdc->osds = RB_ROOT;
f5a2041b 2192 INIT_LIST_HEAD(&osdc->osd_lru);
f24e9980 2193 osdc->requests = RB_ROOT;
422d2cb8 2194 INIT_LIST_HEAD(&osdc->req_lru);
6f6c7006
SW
2195 INIT_LIST_HEAD(&osdc->req_unsent);
2196 INIT_LIST_HEAD(&osdc->req_notarget);
a40c4f10 2197 INIT_LIST_HEAD(&osdc->req_linger);
f24e9980
SW
2198 osdc->num_requests = 0;
2199 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
f5a2041b 2200 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
a40c4f10
YS
2201 spin_lock_init(&osdc->event_lock);
2202 osdc->event_tree = RB_ROOT;
2203 osdc->event_count = 0;
f5a2041b
YS
2204
2205 schedule_delayed_work(&osdc->osds_timeout_work,
3d14c5d2 2206 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
f24e9980 2207
5f44f142 2208 err = -ENOMEM;
f24e9980
SW
2209 osdc->req_mempool = mempool_create_kmalloc_pool(10,
2210 sizeof(struct ceph_osd_request));
2211 if (!osdc->req_mempool)
5f44f142 2212 goto out;
f24e9980 2213
d50b409f
SW
2214 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
2215 OSD_OP_FRONT_LEN, 10, true,
4f48280e 2216 "osd_op");
f24e9980 2217 if (err < 0)
5f44f142 2218 goto out_mempool;
d50b409f 2219 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4f48280e
SW
2220 OSD_OPREPLY_FRONT_LEN, 10, true,
2221 "osd_op_reply");
c16e7869
SW
2222 if (err < 0)
2223 goto out_msgpool;
a40c4f10
YS
2224
2225 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
2226 if (IS_ERR(osdc->notify_wq)) {
2227 err = PTR_ERR(osdc->notify_wq);
2228 osdc->notify_wq = NULL;
2229 goto out_msgpool;
2230 }
f24e9980 2231 return 0;
5f44f142 2232
c16e7869
SW
2233out_msgpool:
2234 ceph_msgpool_destroy(&osdc->msgpool_op);
5f44f142
SW
2235out_mempool:
2236 mempool_destroy(osdc->req_mempool);
2237out:
2238 return err;
f24e9980
SW
2239}
2240
2241void ceph_osdc_stop(struct ceph_osd_client *osdc)
2242{
a40c4f10
YS
2243 flush_workqueue(osdc->notify_wq);
2244 destroy_workqueue(osdc->notify_wq);
f24e9980 2245 cancel_delayed_work_sync(&osdc->timeout_work);
f5a2041b 2246 cancel_delayed_work_sync(&osdc->osds_timeout_work);
f24e9980
SW
2247 if (osdc->osdmap) {
2248 ceph_osdmap_destroy(osdc->osdmap);
2249 osdc->osdmap = NULL;
2250 }
aca420bc 2251 remove_all_osds(osdc);
f24e9980
SW
2252 mempool_destroy(osdc->req_mempool);
2253 ceph_msgpool_destroy(&osdc->msgpool_op);
c16e7869 2254 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
f24e9980
SW
2255}
2256
2257/*
2258 * Read some contiguous pages. If we cross a stripe boundary, shorten
2259 * *plen. Return number of bytes read, or error.
2260 */
2261int ceph_osdc_readpages(struct ceph_osd_client *osdc,
2262 struct ceph_vino vino, struct ceph_file_layout *layout,
2263 u64 off, u64 *plen,
2264 u32 truncate_seq, u64 truncate_size,
b7495fc2 2265 struct page **pages, int num_pages, int page_align)
f24e9980
SW
2266{
2267 struct ceph_osd_request *req;
2268 int rc = 0;
2269
2270 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
2271 vino.snap, off, *plen);
79528734 2272 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1,
f24e9980 2273 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
acead002 2274 NULL, truncate_seq, truncate_size,
153e5167 2275 false);
6816282d
SW
2276 if (IS_ERR(req))
2277 return PTR_ERR(req);
f24e9980
SW
2278
2279 /* it may be a short read due to an object boundary */
0fff87ec 2280
a4ce40a9
AE
2281 osd_req_op_extent_osd_data_pages(req, 0, false,
2282 pages, *plen, page_align, false, false);
f24e9980 2283
e0c59487 2284 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
43bfe5de 2285 off, *plen, *plen, page_align);
f24e9980 2286
79528734 2287 ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
02ee07d3 2288
f24e9980
SW
2289 rc = ceph_osdc_start_request(osdc, req, false);
2290 if (!rc)
2291 rc = ceph_osdc_wait_request(osdc, req);
2292
2293 ceph_osdc_put_request(req);
2294 dout("readpages result %d\n", rc);
2295 return rc;
2296}
3d14c5d2 2297EXPORT_SYMBOL(ceph_osdc_readpages);
f24e9980
SW
2298
2299/*
2300 * do a synchronous write on N pages
2301 */
2302int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
2303 struct ceph_file_layout *layout,
2304 struct ceph_snap_context *snapc,
2305 u64 off, u64 len,
2306 u32 truncate_seq, u64 truncate_size,
2307 struct timespec *mtime,
24808826 2308 struct page **pages, int num_pages)
f24e9980
SW
2309{
2310 struct ceph_osd_request *req;
2311 int rc = 0;
b7495fc2 2312 int page_align = off & ~PAGE_MASK;
f24e9980 2313
acead002 2314 BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */
79528734 2315 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1,
f24e9980 2316 CEPH_OSD_OP_WRITE,
24808826 2317 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
acead002 2318 snapc, truncate_seq, truncate_size,
153e5167 2319 true);
6816282d
SW
2320 if (IS_ERR(req))
2321 return PTR_ERR(req);
f24e9980
SW
2322
2323 /* it may be a short write due to an object boundary */
a4ce40a9 2324 osd_req_op_extent_osd_data_pages(req, 0, true, pages, len, page_align,
43bfe5de
AE
2325 false, false);
2326 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
f24e9980 2327
79528734 2328 ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime);
02ee07d3 2329
87f979d3 2330 rc = ceph_osdc_start_request(osdc, req, true);
f24e9980
SW
2331 if (!rc)
2332 rc = ceph_osdc_wait_request(osdc, req);
2333
2334 ceph_osdc_put_request(req);
2335 if (rc == 0)
2336 rc = len;
2337 dout("writepages result %d\n", rc);
2338 return rc;
2339}
3d14c5d2 2340EXPORT_SYMBOL(ceph_osdc_writepages);
f24e9980
SW
2341
2342/*
2343 * handle incoming message
2344 */
2345static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2346{
2347 struct ceph_osd *osd = con->private;
32c895e7 2348 struct ceph_osd_client *osdc;
f24e9980
SW
2349 int type = le16_to_cpu(msg->hdr.type);
2350
2351 if (!osd)
4a32f93d 2352 goto out;
32c895e7 2353 osdc = osd->o_osdc;
f24e9980
SW
2354
2355 switch (type) {
2356 case CEPH_MSG_OSD_MAP:
2357 ceph_osdc_handle_map(osdc, msg);
2358 break;
2359 case CEPH_MSG_OSD_OPREPLY:
350b1c32 2360 handle_reply(osdc, msg, con);
f24e9980 2361 break;
a40c4f10
YS
2362 case CEPH_MSG_WATCH_NOTIFY:
2363 handle_watch_notify(osdc, msg);
2364 break;
f24e9980
SW
2365
2366 default:
2367 pr_err("received unknown message type %d %s\n", type,
2368 ceph_msg_type_name(type));
2369 }
4a32f93d 2370out:
f24e9980
SW
2371 ceph_msg_put(msg);
2372}
2373
5b3a4db3 2374/*
21b667f6
SW
2375 * lookup and return message for incoming reply. set up reply message
2376 * pages.
5b3a4db3
SW
2377 */
2378static struct ceph_msg *get_reply(struct ceph_connection *con,
2450418c
YS
2379 struct ceph_msg_header *hdr,
2380 int *skip)
f24e9980
SW
2381{
2382 struct ceph_osd *osd = con->private;
2383 struct ceph_osd_client *osdc = osd->o_osdc;
2450418c 2384 struct ceph_msg *m;
0547a9b3 2385 struct ceph_osd_request *req;
5b3a4db3
SW
2386 int front = le32_to_cpu(hdr->front_len);
2387 int data_len = le32_to_cpu(hdr->data_len);
0547a9b3 2388 u64 tid;
f24e9980 2389
0547a9b3
YS
2390 tid = le64_to_cpu(hdr->tid);
2391 mutex_lock(&osdc->request_mutex);
2392 req = __lookup_request(osdc, tid);
2393 if (!req) {
2394 *skip = 1;
2395 m = NULL;
756a16a5
SW
2396 dout("get_reply unknown tid %llu from osd%d\n", tid,
2397 osd->o_osd);
0547a9b3
YS
2398 goto out;
2399 }
c16e7869 2400
ace6d3a9 2401 if (req->r_reply->con)
8921d114 2402 dout("%s revoking msg %p from old con %p\n", __func__,
ace6d3a9
AE
2403 req->r_reply, req->r_reply->con);
2404 ceph_msg_revoke_incoming(req->r_reply);
0547a9b3 2405
c16e7869
SW
2406 if (front > req->r_reply->front.iov_len) {
2407 pr_warning("get_reply front %d > preallocated %d\n",
2408 front, (int)req->r_reply->front.iov_len);
b61c2763 2409 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
a79832f2 2410 if (!m)
c16e7869
SW
2411 goto out;
2412 ceph_msg_put(req->r_reply);
2413 req->r_reply = m;
2414 }
2415 m = ceph_msg_get(req->r_reply);
2416
0547a9b3 2417 if (data_len > 0) {
a4ce40a9 2418 struct ceph_osd_data *osd_data;
0fff87ec 2419
a4ce40a9
AE
2420 /*
2421 * XXX This is assuming there is only one op containing
2422 * XXX page data. Probably OK for reads, but this
2423 * XXX ought to be done more generally.
2424 */
2425 osd_data = osd_req_op_extent_osd_data(req, 0, false);
0fff87ec 2426 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
0fff87ec 2427 if (osd_data->pages &&
e0c59487 2428 unlikely(osd_data->length < data_len)) {
2ac2b7a6 2429
e0c59487
AE
2430 pr_warning("tid %lld reply has %d bytes "
2431 "we had only %llu bytes ready\n",
2432 tid, data_len, osd_data->length);
2ac2b7a6
AE
2433 *skip = 1;
2434 ceph_msg_put(m);
2435 m = NULL;
2436 goto out;
2437 }
2ac2b7a6 2438 }
0547a9b3 2439 }
5b3a4db3 2440 *skip = 0;
c16e7869 2441 dout("get_reply tid %lld %p\n", tid, m);
0547a9b3
YS
2442
2443out:
2444 mutex_unlock(&osdc->request_mutex);
2450418c 2445 return m;
5b3a4db3
SW
2446
2447}
2448
2449static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2450 struct ceph_msg_header *hdr,
2451 int *skip)
2452{
2453 struct ceph_osd *osd = con->private;
2454 int type = le16_to_cpu(hdr->type);
2455 int front = le32_to_cpu(hdr->front_len);
2456
1c20f2d2 2457 *skip = 0;
5b3a4db3
SW
2458 switch (type) {
2459 case CEPH_MSG_OSD_MAP:
a40c4f10 2460 case CEPH_MSG_WATCH_NOTIFY:
b61c2763 2461 return ceph_msg_new(type, front, GFP_NOFS, false);
5b3a4db3
SW
2462 case CEPH_MSG_OSD_OPREPLY:
2463 return get_reply(con, hdr, skip);
2464 default:
2465 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
2466 osd->o_osd);
2467 *skip = 1;
2468 return NULL;
2469 }
f24e9980
SW
2470}
2471
2472/*
2473 * Wrappers to refcount containing ceph_osd struct
2474 */
2475static struct ceph_connection *get_osd_con(struct ceph_connection *con)
2476{
2477 struct ceph_osd *osd = con->private;
2478 if (get_osd(osd))
2479 return con;
2480 return NULL;
2481}
2482
2483static void put_osd_con(struct ceph_connection *con)
2484{
2485 struct ceph_osd *osd = con->private;
2486 put_osd(osd);
2487}
2488
4e7a5dcd
SW
2489/*
2490 * authentication
2491 */
a3530df3
AE
2492/*
2493 * Note: returned pointer is the address of a structure that's
2494 * managed separately. Caller must *not* attempt to free it.
2495 */
2496static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
8f43fb53 2497 int *proto, int force_new)
4e7a5dcd
SW
2498{
2499 struct ceph_osd *o = con->private;
2500 struct ceph_osd_client *osdc = o->o_osdc;
2501 struct ceph_auth_client *ac = osdc->client->monc.auth;
74f1869f 2502 struct ceph_auth_handshake *auth = &o->o_auth;
4e7a5dcd 2503
74f1869f 2504 if (force_new && auth->authorizer) {
27859f97 2505 ceph_auth_destroy_authorizer(ac, auth->authorizer);
74f1869f
AE
2506 auth->authorizer = NULL;
2507 }
27859f97
SW
2508 if (!auth->authorizer) {
2509 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2510 auth);
4e7a5dcd 2511 if (ret)
a3530df3 2512 return ERR_PTR(ret);
27859f97
SW
2513 } else {
2514 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
0bed9b5c
SW
2515 auth);
2516 if (ret)
2517 return ERR_PTR(ret);
4e7a5dcd 2518 }
4e7a5dcd 2519 *proto = ac->protocol;
74f1869f 2520
a3530df3 2521 return auth;
4e7a5dcd
SW
2522}
2523
2524
2525static int verify_authorizer_reply(struct ceph_connection *con, int len)
2526{
2527 struct ceph_osd *o = con->private;
2528 struct ceph_osd_client *osdc = o->o_osdc;
2529 struct ceph_auth_client *ac = osdc->client->monc.auth;
2530
27859f97 2531 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
4e7a5dcd
SW
2532}
2533
9bd2e6f8
SW
2534static int invalidate_authorizer(struct ceph_connection *con)
2535{
2536 struct ceph_osd *o = con->private;
2537 struct ceph_osd_client *osdc = o->o_osdc;
2538 struct ceph_auth_client *ac = osdc->client->monc.auth;
2539
27859f97 2540 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
9bd2e6f8
SW
2541 return ceph_monc_validate_auth(&osdc->client->monc);
2542}
4e7a5dcd 2543
9e32789f 2544static const struct ceph_connection_operations osd_con_ops = {
f24e9980
SW
2545 .get = get_osd_con,
2546 .put = put_osd_con,
2547 .dispatch = dispatch,
4e7a5dcd
SW
2548 .get_authorizer = get_authorizer,
2549 .verify_authorizer_reply = verify_authorizer_reply,
9bd2e6f8 2550 .invalidate_authorizer = invalidate_authorizer,
f24e9980 2551 .alloc_msg = alloc_msg,
81b024e7 2552 .fault = osd_reset,
f24e9980 2553};