ceph: fix printk format warnings in file.c
[linux-2.6-block.git] / net / ceph / osd_client.c
CommitLineData
a4ce40a9 1
3d14c5d2 2#include <linux/ceph/ceph_debug.h>
f24e9980 3
3d14c5d2 4#include <linux/module.h>
f24e9980
SW
5#include <linux/err.h>
6#include <linux/highmem.h>
7#include <linux/mm.h>
8#include <linux/pagemap.h>
9#include <linux/slab.h>
10#include <linux/uaccess.h>
68b4476b
YS
11#ifdef CONFIG_BLOCK
12#include <linux/bio.h>
13#endif
f24e9980 14
3d14c5d2
YS
15#include <linux/ceph/libceph.h>
16#include <linux/ceph/osd_client.h>
17#include <linux/ceph/messenger.h>
18#include <linux/ceph/decode.h>
19#include <linux/ceph/auth.h>
20#include <linux/ceph/pagelist.h>
f24e9980 21
c16e7869
SW
22#define OSD_OP_FRONT_LEN 4096
23#define OSD_OPREPLY_FRONT_LEN 512
0d59ab81 24
9e32789f 25static const struct ceph_connection_operations osd_con_ops;
f24e9980 26
f9d25199 27static void __send_queued(struct ceph_osd_client *osdc);
6f6c7006 28static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
a40c4f10
YS
29static void __register_request(struct ceph_osd_client *osdc,
30 struct ceph_osd_request *req);
31static void __unregister_linger_request(struct ceph_osd_client *osdc,
32 struct ceph_osd_request *req);
56e925b6
SW
33static void __send_request(struct ceph_osd_client *osdc,
34 struct ceph_osd_request *req);
f24e9980
SW
35
36/*
37 * Implement client access to distributed object storage cluster.
38 *
39 * All data objects are stored within a cluster/cloud of OSDs, or
40 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
41 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
42 * remote daemons serving up and coordinating consistent and safe
43 * access to storage.
44 *
45 * Cluster membership and the mapping of data objects onto storage devices
46 * are described by the osd map.
47 *
48 * We keep track of pending OSD requests (read, write), resubmit
49 * requests to different OSDs when the cluster topology/data layout
50 * change, or retry the affected requests when the communications
51 * channel with an OSD is reset.
52 */
53
54/*
55 * calculate the mapping of a file extent onto an object, and fill out the
56 * request accordingly. shorten extent as necessary if it crosses an
57 * object boundary.
58 *
59 * fill osd op in request message.
60 */
dbe0fc41 61static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
a19dadfb 62 u64 *objnum, u64 *objoff, u64 *objlen)
f24e9980 63{
60e56f13 64 u64 orig_len = *plen;
d63b77f4 65 int r;
f24e9980 66
60e56f13 67 /* object extent? */
75d1c941
AE
68 r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
69 objoff, objlen);
d63b77f4
SW
70 if (r < 0)
71 return r;
75d1c941
AE
72 if (*objlen < orig_len) {
73 *plen = *objlen;
60e56f13
AE
74 dout(" skipping last %llu, final file extent %llu~%llu\n",
75 orig_len - *plen, off, *plen);
76 }
77
75d1c941 78 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
f24e9980 79
3ff5f385 80 return 0;
f24e9980
SW
81}
82
c54d47bf
AE
83static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
84{
85 memset(osd_data, 0, sizeof (*osd_data));
86 osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
87}
88
a4ce40a9 89static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
43bfe5de
AE
90 struct page **pages, u64 length, u32 alignment,
91 bool pages_from_pool, bool own_pages)
92{
93 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
94 osd_data->pages = pages;
95 osd_data->length = length;
96 osd_data->alignment = alignment;
97 osd_data->pages_from_pool = pages_from_pool;
98 osd_data->own_pages = own_pages;
99}
43bfe5de 100
a4ce40a9 101static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
43bfe5de
AE
102 struct ceph_pagelist *pagelist)
103{
104 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
105 osd_data->pagelist = pagelist;
106}
43bfe5de
AE
107
108#ifdef CONFIG_BLOCK
a4ce40a9 109static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
43bfe5de
AE
110 struct bio *bio, size_t bio_length)
111{
112 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
113 osd_data->bio = bio;
114 osd_data->bio_length = bio_length;
115}
43bfe5de
AE
116#endif /* CONFIG_BLOCK */
117
a4ce40a9
AE
118struct ceph_osd_data *
119osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
120 unsigned int which, bool write_request)
121{
122 BUG_ON(which >= osd_req->r_num_ops);
123
5476492f 124 return &osd_req->r_ops[which].extent.osd_data;
a4ce40a9
AE
125}
126EXPORT_SYMBOL(osd_req_op_extent_osd_data);
127
128struct ceph_osd_data *
129osd_req_op_cls_request_info(struct ceph_osd_request *osd_req,
130 unsigned int which)
131{
132 BUG_ON(which >= osd_req->r_num_ops);
133
5476492f 134 return &osd_req->r_ops[which].cls.request_info;
a4ce40a9
AE
135}
136EXPORT_SYMBOL(osd_req_op_cls_request_info); /* ??? */
137
04017e29
AE
138struct ceph_osd_data *
139osd_req_op_cls_request_data(struct ceph_osd_request *osd_req,
140 unsigned int which)
141{
142 BUG_ON(which >= osd_req->r_num_ops);
143
144 return &osd_req->r_ops[which].cls.request_data;
145}
146EXPORT_SYMBOL(osd_req_op_cls_request_data); /* ??? */
147
a4ce40a9
AE
148struct ceph_osd_data *
149osd_req_op_cls_response_data(struct ceph_osd_request *osd_req,
150 unsigned int which)
151{
152 BUG_ON(which >= osd_req->r_num_ops);
153
5476492f 154 return &osd_req->r_ops[which].cls.response_data;
a4ce40a9
AE
155}
156EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */
157
158void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
159 unsigned int which, bool write_request,
160 struct page **pages, u64 length, u32 alignment,
161 bool pages_from_pool, bool own_pages)
162{
163 struct ceph_osd_data *osd_data;
164
165 osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
166 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
167 pages_from_pool, own_pages);
a4ce40a9
AE
168}
169EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
170
171void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
172 unsigned int which, bool write_request,
173 struct ceph_pagelist *pagelist)
174{
175 struct ceph_osd_data *osd_data;
176
177 osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
178 ceph_osd_data_pagelist_init(osd_data, pagelist);
a4ce40a9
AE
179}
180EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
181
182#ifdef CONFIG_BLOCK
183void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
184 unsigned int which, bool write_request,
185 struct bio *bio, size_t bio_length)
186{
187 struct ceph_osd_data *osd_data;
188
189 osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
190 ceph_osd_data_bio_init(osd_data, bio, bio_length);
a4ce40a9
AE
191}
192EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
193#endif /* CONFIG_BLOCK */
194
195static void osd_req_op_cls_request_info_pagelist(
196 struct ceph_osd_request *osd_req,
197 unsigned int which, struct ceph_pagelist *pagelist)
198{
199 struct ceph_osd_data *osd_data;
200
201 osd_data = osd_req_op_cls_request_info(osd_req, which);
202 ceph_osd_data_pagelist_init(osd_data, pagelist);
a4ce40a9
AE
203}
204
04017e29
AE
205void osd_req_op_cls_request_data_pagelist(
206 struct ceph_osd_request *osd_req,
207 unsigned int which, struct ceph_pagelist *pagelist)
208{
209 struct ceph_osd_data *osd_data;
210
211 osd_data = osd_req_op_cls_request_data(osd_req, which);
212 ceph_osd_data_pagelist_init(osd_data, pagelist);
213}
214EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
215
a4ce40a9
AE
216void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
217 unsigned int which, struct page **pages, u64 length,
218 u32 alignment, bool pages_from_pool, bool own_pages)
219{
220 struct ceph_osd_data *osd_data;
221
222 osd_data = osd_req_op_cls_response_data(osd_req, which);
223 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
224 pages_from_pool, own_pages);
a4ce40a9
AE
225}
226EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
227
23c08a9c
AE
228static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
229{
230 switch (osd_data->type) {
231 case CEPH_OSD_DATA_TYPE_NONE:
232 return 0;
233 case CEPH_OSD_DATA_TYPE_PAGES:
234 return osd_data->length;
235 case CEPH_OSD_DATA_TYPE_PAGELIST:
236 return (u64)osd_data->pagelist->length;
237#ifdef CONFIG_BLOCK
238 case CEPH_OSD_DATA_TYPE_BIO:
239 return (u64)osd_data->bio_length;
240#endif /* CONFIG_BLOCK */
241 default:
242 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
243 return 0;
244 }
245}
246
5476492f 247
c54d47bf
AE
248static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
249{
5476492f 250 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
c54d47bf
AE
251 int num_pages;
252
253 num_pages = calc_pages_for((u64)osd_data->alignment,
254 (u64)osd_data->length);
255 ceph_release_page_vector(osd_data->pages, num_pages);
256 }
5476492f
AE
257 ceph_osd_data_init(osd_data);
258}
259
260static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
261 unsigned int which)
262{
263 struct ceph_osd_req_op *op;
264
265 BUG_ON(which >= osd_req->r_num_ops);
266 op = &osd_req->r_ops[which];
267
268 switch (op->op) {
269 case CEPH_OSD_OP_READ:
270 case CEPH_OSD_OP_WRITE:
271 ceph_osd_data_release(&op->extent.osd_data);
272 break;
273 case CEPH_OSD_OP_CALL:
274 ceph_osd_data_release(&op->cls.request_info);
04017e29 275 ceph_osd_data_release(&op->cls.request_data);
5476492f
AE
276 ceph_osd_data_release(&op->cls.response_data);
277 break;
278 default:
279 break;
280 }
c54d47bf
AE
281}
282
f24e9980
SW
283/*
284 * requests
285 */
415e49a9 286void ceph_osdc_release_request(struct kref *kref)
f24e9980 287{
c54d47bf 288 struct ceph_osd_request *req;
5476492f 289 unsigned int which;
415e49a9 290
c54d47bf 291 req = container_of(kref, struct ceph_osd_request, r_kref);
415e49a9
SW
292 if (req->r_request)
293 ceph_msg_put(req->r_request);
ace6d3a9 294 if (req->r_reply) {
8921d114 295 ceph_msg_revoke_incoming(req->r_reply);
ab8cb34a 296 ceph_msg_put(req->r_reply);
ace6d3a9 297 }
0fff87ec 298
5476492f
AE
299 for (which = 0; which < req->r_num_ops; which++)
300 osd_req_op_data_release(req, which);
0fff87ec 301
415e49a9
SW
302 ceph_put_snap_context(req->r_snapc);
303 if (req->r_mempool)
304 mempool_free(req, req->r_osdc->req_mempool);
305 else
306 kfree(req);
f24e9980 307}
3d14c5d2 308EXPORT_SYMBOL(ceph_osdc_release_request);
68b4476b 309
3499e8a5 310struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
f24e9980 311 struct ceph_snap_context *snapc,
1b83bef2 312 unsigned int num_ops,
3499e8a5 313 bool use_mempool,
54a54007 314 gfp_t gfp_flags)
f24e9980
SW
315{
316 struct ceph_osd_request *req;
317 struct ceph_msg *msg;
1b83bef2
SW
318 size_t msg_size;
319
79528734
AE
320 BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX);
321 BUG_ON(num_ops > CEPH_OSD_MAX_OP);
322
1b83bef2
SW
323 msg_size = 4 + 4 + 8 + 8 + 4+8;
324 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
325 msg_size += 1 + 8 + 4 + 4; /* pg_t */
326 msg_size += 4 + MAX_OBJ_NAME_SIZE;
327 msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
328 msg_size += 8; /* snapid */
329 msg_size += 8; /* snap_seq */
330 msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
331 msg_size += 4;
f24e9980
SW
332
333 if (use_mempool) {
3499e8a5 334 req = mempool_alloc(osdc->req_mempool, gfp_flags);
f24e9980
SW
335 memset(req, 0, sizeof(*req));
336 } else {
3499e8a5 337 req = kzalloc(sizeof(*req), gfp_flags);
f24e9980
SW
338 }
339 if (req == NULL)
a79832f2 340 return NULL;
f24e9980 341
f24e9980
SW
342 req->r_osdc = osdc;
343 req->r_mempool = use_mempool;
79528734 344 req->r_num_ops = num_ops;
68b4476b 345
415e49a9 346 kref_init(&req->r_kref);
f24e9980
SW
347 init_completion(&req->r_completion);
348 init_completion(&req->r_safe_completion);
a978fa20 349 RB_CLEAR_NODE(&req->r_node);
f24e9980 350 INIT_LIST_HEAD(&req->r_unsafe_item);
a40c4f10
YS
351 INIT_LIST_HEAD(&req->r_linger_item);
352 INIT_LIST_HEAD(&req->r_linger_osd);
935b639a 353 INIT_LIST_HEAD(&req->r_req_lru_item);
cd43045c
SW
354 INIT_LIST_HEAD(&req->r_osd_item);
355
c16e7869
SW
356 /* create reply message */
357 if (use_mempool)
358 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
359 else
360 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
b61c2763 361 OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
a79832f2 362 if (!msg) {
c16e7869 363 ceph_osdc_put_request(req);
a79832f2 364 return NULL;
c16e7869
SW
365 }
366 req->r_reply = msg;
367
368 /* create request message; allow space for oid */
f24e9980 369 if (use_mempool)
8f3bc053 370 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
f24e9980 371 else
b61c2763 372 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
a79832f2 373 if (!msg) {
f24e9980 374 ceph_osdc_put_request(req);
a79832f2 375 return NULL;
f24e9980 376 }
68b4476b 377
f24e9980 378 memset(msg->front.iov_base, 0, msg->front.iov_len);
3499e8a5
YS
379
380 req->r_request = msg;
3499e8a5
YS
381
382 return req;
383}
3d14c5d2 384EXPORT_SYMBOL(ceph_osdc_alloc_request);
3499e8a5 385
a8dd0a37 386static bool osd_req_opcode_valid(u16 opcode)
68b4476b 387{
a8dd0a37 388 switch (opcode) {
68b4476b 389 case CEPH_OSD_OP_READ:
a8dd0a37 390 case CEPH_OSD_OP_STAT:
4c46459c
AE
391 case CEPH_OSD_OP_MAPEXT:
392 case CEPH_OSD_OP_MASKTRUNC:
393 case CEPH_OSD_OP_SPARSE_READ:
a9f36c3e 394 case CEPH_OSD_OP_NOTIFY:
a8dd0a37 395 case CEPH_OSD_OP_NOTIFY_ACK:
4c46459c 396 case CEPH_OSD_OP_ASSERT_VER:
a8dd0a37 397 case CEPH_OSD_OP_WRITE:
4c46459c
AE
398 case CEPH_OSD_OP_WRITEFULL:
399 case CEPH_OSD_OP_TRUNCATE:
400 case CEPH_OSD_OP_ZERO:
401 case CEPH_OSD_OP_DELETE:
402 case CEPH_OSD_OP_APPEND:
a8dd0a37 403 case CEPH_OSD_OP_STARTSYNC:
4c46459c
AE
404 case CEPH_OSD_OP_SETTRUNC:
405 case CEPH_OSD_OP_TRIMTRUNC:
406 case CEPH_OSD_OP_TMAPUP:
407 case CEPH_OSD_OP_TMAPPUT:
408 case CEPH_OSD_OP_TMAPGET:
409 case CEPH_OSD_OP_CREATE:
a9f36c3e 410 case CEPH_OSD_OP_ROLLBACK:
a8dd0a37 411 case CEPH_OSD_OP_WATCH:
4c46459c
AE
412 case CEPH_OSD_OP_OMAPGETKEYS:
413 case CEPH_OSD_OP_OMAPGETVALS:
414 case CEPH_OSD_OP_OMAPGETHEADER:
415 case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
4c46459c
AE
416 case CEPH_OSD_OP_OMAPSETVALS:
417 case CEPH_OSD_OP_OMAPSETHEADER:
418 case CEPH_OSD_OP_OMAPCLEAR:
419 case CEPH_OSD_OP_OMAPRMKEYS:
420 case CEPH_OSD_OP_OMAP_CMP:
421 case CEPH_OSD_OP_CLONERANGE:
422 case CEPH_OSD_OP_ASSERT_SRC_VERSION:
423 case CEPH_OSD_OP_SRC_CMPXATTR:
a9f36c3e 424 case CEPH_OSD_OP_GETXATTR:
4c46459c 425 case CEPH_OSD_OP_GETXATTRS:
a9f36c3e
AE
426 case CEPH_OSD_OP_CMPXATTR:
427 case CEPH_OSD_OP_SETXATTR:
4c46459c
AE
428 case CEPH_OSD_OP_SETXATTRS:
429 case CEPH_OSD_OP_RESETXATTRS:
430 case CEPH_OSD_OP_RMXATTR:
431 case CEPH_OSD_OP_PULL:
432 case CEPH_OSD_OP_PUSH:
433 case CEPH_OSD_OP_BALANCEREADS:
434 case CEPH_OSD_OP_UNBALANCEREADS:
435 case CEPH_OSD_OP_SCRUB:
436 case CEPH_OSD_OP_SCRUB_RESERVE:
437 case CEPH_OSD_OP_SCRUB_UNRESERVE:
438 case CEPH_OSD_OP_SCRUB_STOP:
439 case CEPH_OSD_OP_SCRUB_MAP:
440 case CEPH_OSD_OP_WRLOCK:
441 case CEPH_OSD_OP_WRUNLOCK:
442 case CEPH_OSD_OP_RDLOCK:
443 case CEPH_OSD_OP_RDUNLOCK:
444 case CEPH_OSD_OP_UPLOCK:
445 case CEPH_OSD_OP_DNLOCK:
a8dd0a37 446 case CEPH_OSD_OP_CALL:
4c46459c
AE
447 case CEPH_OSD_OP_PGLS:
448 case CEPH_OSD_OP_PGLS_FILTER:
a8dd0a37
AE
449 return true;
450 default:
451 return false;
452 }
453}
454
33803f33
AE
455/*
456 * This is an osd op init function for opcodes that have no data or
457 * other information associated with them. It also serves as a
458 * common init routine for all the other init functions, below.
459 */
c99d2d4a
AE
460static struct ceph_osd_req_op *
461osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
462 u16 opcode)
33803f33 463{
c99d2d4a
AE
464 struct ceph_osd_req_op *op;
465
466 BUG_ON(which >= osd_req->r_num_ops);
33803f33
AE
467 BUG_ON(!osd_req_opcode_valid(opcode));
468
c99d2d4a 469 op = &osd_req->r_ops[which];
33803f33 470 memset(op, 0, sizeof (*op));
33803f33 471 op->op = opcode;
c99d2d4a
AE
472
473 return op;
33803f33
AE
474}
475
c99d2d4a
AE
476void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
477 unsigned int which, u16 opcode,
33803f33
AE
478 u64 offset, u64 length,
479 u64 truncate_size, u32 truncate_seq)
480{
c99d2d4a 481 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode);
33803f33
AE
482 size_t payload_len = 0;
483
484 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE);
485
33803f33
AE
486 op->extent.offset = offset;
487 op->extent.length = length;
488 op->extent.truncate_size = truncate_size;
489 op->extent.truncate_seq = truncate_seq;
490 if (opcode == CEPH_OSD_OP_WRITE)
491 payload_len += length;
492
493 op->payload_len = payload_len;
494}
495EXPORT_SYMBOL(osd_req_op_extent_init);
496
c99d2d4a
AE
497void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
498 unsigned int which, u64 length)
e5975c7c 499{
c99d2d4a
AE
500 struct ceph_osd_req_op *op;
501 u64 previous;
502
503 BUG_ON(which >= osd_req->r_num_ops);
504 op = &osd_req->r_ops[which];
505 previous = op->extent.length;
e5975c7c
AE
506
507 if (length == previous)
508 return; /* Nothing to do */
509 BUG_ON(length > previous);
510
511 op->extent.length = length;
512 op->payload_len -= previous - length;
513}
514EXPORT_SYMBOL(osd_req_op_extent_update);
515
c99d2d4a 516void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
04017e29 517 u16 opcode, const char *class, const char *method)
33803f33 518{
c99d2d4a 519 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode);
5f562df5 520 struct ceph_pagelist *pagelist;
33803f33
AE
521 size_t payload_len = 0;
522 size_t size;
523
524 BUG_ON(opcode != CEPH_OSD_OP_CALL);
525
5f562df5
AE
526 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
527 BUG_ON(!pagelist);
528 ceph_pagelist_init(pagelist);
529
33803f33
AE
530 op->cls.class_name = class;
531 size = strlen(class);
532 BUG_ON(size > (size_t) U8_MAX);
533 op->cls.class_len = size;
5f562df5 534 ceph_pagelist_append(pagelist, class, size);
33803f33
AE
535 payload_len += size;
536
537 op->cls.method_name = method;
538 size = strlen(method);
539 BUG_ON(size > (size_t) U8_MAX);
540 op->cls.method_len = size;
5f562df5 541 ceph_pagelist_append(pagelist, method, size);
33803f33
AE
542 payload_len += size;
543
a4ce40a9 544 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
5f562df5 545
33803f33
AE
546 op->cls.argc = 0; /* currently unused */
547
548 op->payload_len = payload_len;
549}
550EXPORT_SYMBOL(osd_req_op_cls_init);
8c042b0d 551
c99d2d4a
AE
552void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
553 unsigned int which, u16 opcode,
33803f33
AE
554 u64 cookie, u64 version, int flag)
555{
c99d2d4a 556 struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode);
33803f33 557
c99d2d4a 558 BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH);
33803f33
AE
559
560 op->watch.cookie = cookie;
561 /* op->watch.ver = version; */ /* XXX 3847 */
562 op->watch.ver = cpu_to_le64(version);
563 if (opcode == CEPH_OSD_OP_WATCH && flag)
c99d2d4a 564 op->watch.flag = (u8)1;
33803f33
AE
565}
566EXPORT_SYMBOL(osd_req_op_watch_init);
567
90af3602 568static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
ec9123c5
AE
569 struct ceph_osd_data *osd_data)
570{
571 u64 length = ceph_osd_data_length(osd_data);
572
573 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
574 BUG_ON(length > (u64) SIZE_MAX);
575 if (length)
90af3602 576 ceph_msg_data_add_pages(msg, osd_data->pages,
ec9123c5
AE
577 length, osd_data->alignment);
578 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
579 BUG_ON(!length);
90af3602 580 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
ec9123c5
AE
581#ifdef CONFIG_BLOCK
582 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
90af3602 583 ceph_msg_data_add_bio(msg, osd_data->bio, length);
ec9123c5
AE
584#endif
585 } else {
586 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
587 }
588}
589
a8dd0a37 590static u64 osd_req_encode_op(struct ceph_osd_request *req,
79528734 591 struct ceph_osd_op *dst, unsigned int which)
a8dd0a37 592{
79528734 593 struct ceph_osd_req_op *src;
04017e29 594 struct ceph_osd_data *osd_data;
54d50649 595 u64 request_data_len = 0;
04017e29 596 u64 data_length;
a8dd0a37 597
79528734
AE
598 BUG_ON(which >= req->r_num_ops);
599 src = &req->r_ops[which];
a8dd0a37
AE
600 if (WARN_ON(!osd_req_opcode_valid(src->op))) {
601 pr_err("unrecognized osd opcode %d\n", src->op);
602
603 return 0;
604 }
605
606 switch (src->op) {
607 case CEPH_OSD_OP_STAT:
608 break;
609 case CEPH_OSD_OP_READ:
610 case CEPH_OSD_OP_WRITE:
611 if (src->op == CEPH_OSD_OP_WRITE)
54d50649 612 request_data_len = src->extent.length;
a8dd0a37
AE
613 dst->extent.offset = cpu_to_le64(src->extent.offset);
614 dst->extent.length = cpu_to_le64(src->extent.length);
615 dst->extent.truncate_size =
616 cpu_to_le64(src->extent.truncate_size);
617 dst->extent.truncate_seq =
618 cpu_to_le32(src->extent.truncate_seq);
04017e29 619 osd_data = &src->extent.osd_data;
5476492f 620 if (src->op == CEPH_OSD_OP_WRITE)
04017e29 621 ceph_osdc_msg_data_add(req->r_request, osd_data);
5476492f 622 else
04017e29 623 ceph_osdc_msg_data_add(req->r_reply, osd_data);
a8dd0a37
AE
624 break;
625 case CEPH_OSD_OP_CALL:
a8dd0a37
AE
626 dst->cls.class_len = src->cls.class_len;
627 dst->cls.method_len = src->cls.method_len;
04017e29
AE
628 osd_data = &src->cls.request_info;
629 ceph_osdc_msg_data_add(req->r_request, osd_data);
630 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST);
631 request_data_len = osd_data->pagelist->length;
632
633 osd_data = &src->cls.request_data;
634 data_length = ceph_osd_data_length(osd_data);
635 if (data_length) {
636 BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE);
637 dst->cls.indata_len = cpu_to_le32(data_length);
638 ceph_osdc_msg_data_add(req->r_request, osd_data);
639 src->payload_len += data_length;
640 request_data_len += data_length;
641 }
642 osd_data = &src->cls.response_data;
643 ceph_osdc_msg_data_add(req->r_reply, osd_data);
a8dd0a37
AE
644 break;
645 case CEPH_OSD_OP_STARTSYNC:
646 break;
647 case CEPH_OSD_OP_NOTIFY_ACK:
648 case CEPH_OSD_OP_WATCH:
649 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
650 dst->watch.ver = cpu_to_le64(src->watch.ver);
651 dst->watch.flag = src->watch.flag;
652 break;
653 default:
4c46459c 654 pr_err("unsupported osd opcode %s\n",
8f63ca2d 655 ceph_osd_op_name(src->op));
4c46459c 656 WARN_ON(1);
a8dd0a37
AE
657
658 return 0;
68b4476b 659 }
a8dd0a37 660 dst->op = cpu_to_le16(src->op);
68b4476b 661 dst->payload_len = cpu_to_le32(src->payload_len);
175face2 662
54d50649 663 return request_data_len;
68b4476b
YS
664}
665
3499e8a5
YS
666/*
667 * build new request AND message, calculate layout, and adjust file
668 * extent as needed.
669 *
670 * if the file was recently truncated, we include information about its
671 * old and new size so that the object can be updated appropriately. (we
672 * avoid synchronously deleting truncated objects because it's slow.)
673 *
674 * if @do_sync, include a 'startsync' command so that the osd will flush
675 * data quickly.
676 */
677struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
678 struct ceph_file_layout *layout,
679 struct ceph_vino vino,
acead002 680 u64 off, u64 *plen, int num_ops,
3499e8a5
YS
681 int opcode, int flags,
682 struct ceph_snap_context *snapc,
3499e8a5
YS
683 u32 truncate_seq,
684 u64 truncate_size,
153e5167 685 bool use_mempool)
3499e8a5 686{
68b4476b 687 struct ceph_osd_request *req;
75d1c941
AE
688 u64 objnum = 0;
689 u64 objoff = 0;
690 u64 objlen = 0;
d18d1e28
AE
691 u32 object_size;
692 u64 object_base;
6816282d 693 int r;
68b4476b 694
d18d1e28 695 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE);
68b4476b 696
acead002 697 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
ae7ca4a3 698 GFP_NOFS);
4ad12621 699 if (!req)
6816282d 700 return ERR_PTR(-ENOMEM);
79528734 701
d178a9e7 702 req->r_flags = flags;
3499e8a5
YS
703
704 /* calculate max write size */
a19dadfb 705 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
3ff5f385
AE
706 if (r < 0) {
707 ceph_osdc_put_request(req);
6816282d 708 return ERR_PTR(r);
3ff5f385 709 }
a19dadfb 710
d18d1e28
AE
711 object_size = le32_to_cpu(layout->fl_object_size);
712 object_base = off - objoff;
713 if (truncate_size <= object_base) {
714 truncate_size = 0;
715 } else {
716 truncate_size -= object_base;
717 if (truncate_size > object_size)
718 truncate_size = object_size;
a19dadfb 719 }
d18d1e28 720
c99d2d4a 721 osd_req_op_extent_init(req, 0, opcode, objoff, objlen,
b0270324 722 truncate_size, truncate_seq);
8c042b0d 723
acead002
AE
724 /*
725 * A second op in the ops array means the caller wants to
726 * also issue a include a 'startsync' command so that the
727 * osd will flush data quickly.
728 */
729 if (num_ops > 1)
c99d2d4a 730 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC);
d18d1e28 731
3499e8a5
YS
732 req->r_file_layout = *layout; /* keep a copy */
733
75d1c941
AE
734 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx",
735 vino.ino, objnum);
dbe0fc41
AE
736 req->r_oid_len = strlen(req->r_oid);
737
f24e9980
SW
738 return req;
739}
3d14c5d2 740EXPORT_SYMBOL(ceph_osdc_new_request);
f24e9980
SW
741
742/*
743 * We keep osd requests in an rbtree, sorted by ->r_tid.
744 */
745static void __insert_request(struct ceph_osd_client *osdc,
746 struct ceph_osd_request *new)
747{
748 struct rb_node **p = &osdc->requests.rb_node;
749 struct rb_node *parent = NULL;
750 struct ceph_osd_request *req = NULL;
751
752 while (*p) {
753 parent = *p;
754 req = rb_entry(parent, struct ceph_osd_request, r_node);
755 if (new->r_tid < req->r_tid)
756 p = &(*p)->rb_left;
757 else if (new->r_tid > req->r_tid)
758 p = &(*p)->rb_right;
759 else
760 BUG();
761 }
762
763 rb_link_node(&new->r_node, parent, p);
764 rb_insert_color(&new->r_node, &osdc->requests);
765}
766
767static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
768 u64 tid)
769{
770 struct ceph_osd_request *req;
771 struct rb_node *n = osdc->requests.rb_node;
772
773 while (n) {
774 req = rb_entry(n, struct ceph_osd_request, r_node);
775 if (tid < req->r_tid)
776 n = n->rb_left;
777 else if (tid > req->r_tid)
778 n = n->rb_right;
779 else
780 return req;
781 }
782 return NULL;
783}
784
785static struct ceph_osd_request *
786__lookup_request_ge(struct ceph_osd_client *osdc,
787 u64 tid)
788{
789 struct ceph_osd_request *req;
790 struct rb_node *n = osdc->requests.rb_node;
791
792 while (n) {
793 req = rb_entry(n, struct ceph_osd_request, r_node);
794 if (tid < req->r_tid) {
795 if (!n->rb_left)
796 return req;
797 n = n->rb_left;
798 } else if (tid > req->r_tid) {
799 n = n->rb_right;
800 } else {
801 return req;
802 }
803 }
804 return NULL;
805}
806
6f6c7006
SW
807/*
808 * Resubmit requests pending on the given osd.
809 */
810static void __kick_osd_requests(struct ceph_osd_client *osdc,
811 struct ceph_osd *osd)
812{
a40c4f10 813 struct ceph_osd_request *req, *nreq;
e02493c0 814 LIST_HEAD(resend);
6f6c7006
SW
815 int err;
816
817 dout("__kick_osd_requests osd%d\n", osd->o_osd);
818 err = __reset_osd(osdc, osd);
685a7555 819 if (err)
6f6c7006 820 return;
e02493c0
AE
821 /*
822 * Build up a list of requests to resend by traversing the
823 * osd's list of requests. Requests for a given object are
824 * sent in tid order, and that is also the order they're
825 * kept on this list. Therefore all requests that are in
826 * flight will be found first, followed by all requests that
827 * have not yet been sent. And to resend requests while
828 * preserving this order we will want to put any sent
829 * requests back on the front of the osd client's unsent
830 * list.
831 *
832 * So we build a separate ordered list of already-sent
833 * requests for the affected osd and splice it onto the
834 * front of the osd client's unsent list. Once we've seen a
835 * request that has not yet been sent we're done. Those
836 * requests are already sitting right where they belong.
837 */
6f6c7006 838 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
e02493c0
AE
839 if (!req->r_sent)
840 break;
841 list_move_tail(&req->r_req_lru_item, &resend);
842 dout("requeueing %p tid %llu osd%d\n", req, req->r_tid,
6f6c7006 843 osd->o_osd);
a40c4f10
YS
844 if (!req->r_linger)
845 req->r_flags |= CEPH_OSD_FLAG_RETRY;
846 }
e02493c0 847 list_splice(&resend, &osdc->req_unsent);
a40c4f10 848
e02493c0
AE
849 /*
850 * Linger requests are re-registered before sending, which
851 * sets up a new tid for each. We add them to the unsent
852 * list at the end to keep things in tid order.
853 */
a40c4f10
YS
854 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
855 r_linger_osd) {
77f38e0e
SW
856 /*
857 * reregister request prior to unregistering linger so
858 * that r_osd is preserved.
859 */
860 BUG_ON(!list_empty(&req->r_req_lru_item));
a40c4f10 861 __register_request(osdc, req);
e02493c0 862 list_add_tail(&req->r_req_lru_item, &osdc->req_unsent);
ad885927 863 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
77f38e0e 864 __unregister_linger_request(osdc, req);
a40c4f10
YS
865 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
866 osd->o_osd);
6f6c7006
SW
867 }
868}
869
f24e9980 870/*
81b024e7 871 * If the osd connection drops, we need to resubmit all requests.
f24e9980
SW
872 */
873static void osd_reset(struct ceph_connection *con)
874{
875 struct ceph_osd *osd = con->private;
876 struct ceph_osd_client *osdc;
877
878 if (!osd)
879 return;
880 dout("osd_reset osd%d\n", osd->o_osd);
881 osdc = osd->o_osdc;
f24e9980 882 down_read(&osdc->map_sem);
83aff95e
SW
883 mutex_lock(&osdc->request_mutex);
884 __kick_osd_requests(osdc, osd);
f9d25199 885 __send_queued(osdc);
83aff95e 886 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
887 up_read(&osdc->map_sem);
888}
889
890/*
891 * Track open sessions with osds.
892 */
e10006f8 893static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
f24e9980
SW
894{
895 struct ceph_osd *osd;
896
897 osd = kzalloc(sizeof(*osd), GFP_NOFS);
898 if (!osd)
899 return NULL;
900
901 atomic_set(&osd->o_ref, 1);
902 osd->o_osdc = osdc;
e10006f8 903 osd->o_osd = onum;
f407731d 904 RB_CLEAR_NODE(&osd->o_node);
f24e9980 905 INIT_LIST_HEAD(&osd->o_requests);
a40c4f10 906 INIT_LIST_HEAD(&osd->o_linger_requests);
f5a2041b 907 INIT_LIST_HEAD(&osd->o_osd_lru);
f24e9980
SW
908 osd->o_incarnation = 1;
909
b7a9e5dd 910 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
4e7a5dcd 911
422d2cb8 912 INIT_LIST_HEAD(&osd->o_keepalive_item);
f24e9980
SW
913 return osd;
914}
915
916static struct ceph_osd *get_osd(struct ceph_osd *osd)
917{
918 if (atomic_inc_not_zero(&osd->o_ref)) {
919 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
920 atomic_read(&osd->o_ref));
921 return osd;
922 } else {
923 dout("get_osd %p FAIL\n", osd);
924 return NULL;
925 }
926}
927
928static void put_osd(struct ceph_osd *osd)
929{
930 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
931 atomic_read(&osd->o_ref) - 1);
a255651d 932 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
79494d1b
SW
933 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
934
27859f97 935 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
f24e9980 936 kfree(osd);
79494d1b 937 }
f24e9980
SW
938}
939
940/*
941 * remove an osd from our map
942 */
f5a2041b 943static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 944{
f5a2041b 945 dout("__remove_osd %p\n", osd);
f24e9980
SW
946 BUG_ON(!list_empty(&osd->o_requests));
947 rb_erase(&osd->o_node, &osdc->osds);
f5a2041b 948 list_del_init(&osd->o_osd_lru);
f24e9980
SW
949 ceph_con_close(&osd->o_con);
950 put_osd(osd);
951}
952
aca420bc
SW
953static void remove_all_osds(struct ceph_osd_client *osdc)
954{
048a9d2d 955 dout("%s %p\n", __func__, osdc);
aca420bc
SW
956 mutex_lock(&osdc->request_mutex);
957 while (!RB_EMPTY_ROOT(&osdc->osds)) {
958 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
959 struct ceph_osd, o_node);
960 __remove_osd(osdc, osd);
961 }
962 mutex_unlock(&osdc->request_mutex);
963}
964
f5a2041b
YS
965static void __move_osd_to_lru(struct ceph_osd_client *osdc,
966 struct ceph_osd *osd)
967{
968 dout("__move_osd_to_lru %p\n", osd);
969 BUG_ON(!list_empty(&osd->o_osd_lru));
970 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
3d14c5d2 971 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
f5a2041b
YS
972}
973
974static void __remove_osd_from_lru(struct ceph_osd *osd)
975{
976 dout("__remove_osd_from_lru %p\n", osd);
977 if (!list_empty(&osd->o_osd_lru))
978 list_del_init(&osd->o_osd_lru);
979}
980
aca420bc 981static void remove_old_osds(struct ceph_osd_client *osdc)
f5a2041b
YS
982{
983 struct ceph_osd *osd, *nosd;
984
985 dout("__remove_old_osds %p\n", osdc);
986 mutex_lock(&osdc->request_mutex);
987 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
aca420bc 988 if (time_before(jiffies, osd->lru_ttl))
f5a2041b
YS
989 break;
990 __remove_osd(osdc, osd);
991 }
992 mutex_unlock(&osdc->request_mutex);
993}
994
f24e9980
SW
995/*
996 * reset osd connect
997 */
f5a2041b 998static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 999{
c3acb181 1000 struct ceph_entity_addr *peer_addr;
f24e9980 1001
f5a2041b 1002 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
a40c4f10
YS
1003 if (list_empty(&osd->o_requests) &&
1004 list_empty(&osd->o_linger_requests)) {
f5a2041b 1005 __remove_osd(osdc, osd);
c3acb181
AE
1006
1007 return -ENODEV;
1008 }
1009
1010 peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
1011 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1012 !ceph_con_opened(&osd->o_con)) {
1013 struct ceph_osd_request *req;
1014
87b315a5
SW
1015 dout(" osd addr hasn't changed and connection never opened,"
1016 " letting msgr retry");
1017 /* touch each r_stamp for handle_timeout()'s benfit */
1018 list_for_each_entry(req, &osd->o_requests, r_osd_item)
1019 req->r_stamp = jiffies;
c3acb181
AE
1020
1021 return -EAGAIN;
f24e9980 1022 }
c3acb181
AE
1023
1024 ceph_con_close(&osd->o_con);
1025 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1026 osd->o_incarnation++;
1027
1028 return 0;
f24e9980
SW
1029}
1030
1031static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
1032{
1033 struct rb_node **p = &osdc->osds.rb_node;
1034 struct rb_node *parent = NULL;
1035 struct ceph_osd *osd = NULL;
1036
aca420bc 1037 dout("__insert_osd %p osd%d\n", new, new->o_osd);
f24e9980
SW
1038 while (*p) {
1039 parent = *p;
1040 osd = rb_entry(parent, struct ceph_osd, o_node);
1041 if (new->o_osd < osd->o_osd)
1042 p = &(*p)->rb_left;
1043 else if (new->o_osd > osd->o_osd)
1044 p = &(*p)->rb_right;
1045 else
1046 BUG();
1047 }
1048
1049 rb_link_node(&new->o_node, parent, p);
1050 rb_insert_color(&new->o_node, &osdc->osds);
1051}
1052
1053static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
1054{
1055 struct ceph_osd *osd;
1056 struct rb_node *n = osdc->osds.rb_node;
1057
1058 while (n) {
1059 osd = rb_entry(n, struct ceph_osd, o_node);
1060 if (o < osd->o_osd)
1061 n = n->rb_left;
1062 else if (o > osd->o_osd)
1063 n = n->rb_right;
1064 else
1065 return osd;
1066 }
1067 return NULL;
1068}
1069
422d2cb8
YS
1070static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
1071{
1072 schedule_delayed_work(&osdc->timeout_work,
3d14c5d2 1073 osdc->client->options->osd_keepalive_timeout * HZ);
422d2cb8
YS
1074}
1075
1076static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
1077{
1078 cancel_delayed_work(&osdc->timeout_work);
1079}
f24e9980
SW
1080
1081/*
1082 * Register request, assign tid. If this is the first request, set up
1083 * the timeout event.
1084 */
a40c4f10
YS
1085static void __register_request(struct ceph_osd_client *osdc,
1086 struct ceph_osd_request *req)
f24e9980 1087{
f24e9980 1088 req->r_tid = ++osdc->last_tid;
6df058c0 1089 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
77f38e0e 1090 dout("__register_request %p tid %lld\n", req, req->r_tid);
f24e9980
SW
1091 __insert_request(osdc, req);
1092 ceph_osdc_get_request(req);
1093 osdc->num_requests++;
f24e9980 1094 if (osdc->num_requests == 1) {
422d2cb8
YS
1095 dout(" first request, scheduling timeout\n");
1096 __schedule_osd_timeout(osdc);
f24e9980 1097 }
a40c4f10
YS
1098}
1099
f24e9980
SW
1100/*
1101 * called under osdc->request_mutex
1102 */
1103static void __unregister_request(struct ceph_osd_client *osdc,
1104 struct ceph_osd_request *req)
1105{
35f9f8a0
SW
1106 if (RB_EMPTY_NODE(&req->r_node)) {
1107 dout("__unregister_request %p tid %lld not registered\n",
1108 req, req->r_tid);
1109 return;
1110 }
1111
f24e9980
SW
1112 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
1113 rb_erase(&req->r_node, &osdc->requests);
1114 osdc->num_requests--;
1115
0ba6478d
SW
1116 if (req->r_osd) {
1117 /* make sure the original request isn't in flight. */
6740a845 1118 ceph_msg_revoke(req->r_request);
0ba6478d
SW
1119
1120 list_del_init(&req->r_osd_item);
a40c4f10
YS
1121 if (list_empty(&req->r_osd->o_requests) &&
1122 list_empty(&req->r_osd->o_linger_requests)) {
1123 dout("moving osd to %p lru\n", req->r_osd);
f5a2041b 1124 __move_osd_to_lru(osdc, req->r_osd);
a40c4f10 1125 }
fbdb9190 1126 if (list_empty(&req->r_linger_item))
a40c4f10 1127 req->r_osd = NULL;
0ba6478d 1128 }
f24e9980 1129
7d5f2481 1130 list_del_init(&req->r_req_lru_item);
f24e9980
SW
1131 ceph_osdc_put_request(req);
1132
422d2cb8
YS
1133 if (osdc->num_requests == 0) {
1134 dout(" no requests, canceling timeout\n");
1135 __cancel_osd_timeout(osdc);
f24e9980
SW
1136 }
1137}
1138
1139/*
1140 * Cancel a previously queued request message
1141 */
1142static void __cancel_request(struct ceph_osd_request *req)
1143{
6bc18876 1144 if (req->r_sent && req->r_osd) {
6740a845 1145 ceph_msg_revoke(req->r_request);
f24e9980
SW
1146 req->r_sent = 0;
1147 }
1148}
1149
a40c4f10
YS
1150static void __register_linger_request(struct ceph_osd_client *osdc,
1151 struct ceph_osd_request *req)
1152{
1153 dout("__register_linger_request %p\n", req);
1154 list_add_tail(&req->r_linger_item, &osdc->req_linger);
6194ea89
SW
1155 if (req->r_osd)
1156 list_add_tail(&req->r_linger_osd,
1157 &req->r_osd->o_linger_requests);
a40c4f10
YS
1158}
1159
1160static void __unregister_linger_request(struct ceph_osd_client *osdc,
1161 struct ceph_osd_request *req)
1162{
1163 dout("__unregister_linger_request %p\n", req);
61c74035 1164 list_del_init(&req->r_linger_item);
a40c4f10 1165 if (req->r_osd) {
a40c4f10
YS
1166 list_del_init(&req->r_linger_osd);
1167
1168 if (list_empty(&req->r_osd->o_requests) &&
1169 list_empty(&req->r_osd->o_linger_requests)) {
1170 dout("moving osd to %p lru\n", req->r_osd);
1171 __move_osd_to_lru(osdc, req->r_osd);
1172 }
fbdb9190
SW
1173 if (list_empty(&req->r_osd_item))
1174 req->r_osd = NULL;
a40c4f10
YS
1175 }
1176}
1177
1178void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
1179 struct ceph_osd_request *req)
1180{
1181 mutex_lock(&osdc->request_mutex);
1182 if (req->r_linger) {
1183 __unregister_linger_request(osdc, req);
1184 ceph_osdc_put_request(req);
1185 }
1186 mutex_unlock(&osdc->request_mutex);
1187}
1188EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
1189
1190void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
1191 struct ceph_osd_request *req)
1192{
1193 if (!req->r_linger) {
1194 dout("set_request_linger %p\n", req);
1195 req->r_linger = 1;
1196 /*
1197 * caller is now responsible for calling
1198 * unregister_linger_request
1199 */
1200 ceph_osdc_get_request(req);
1201 }
1202}
1203EXPORT_SYMBOL(ceph_osdc_set_request_linger);
1204
f24e9980
SW
1205/*
1206 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
1207 * (as needed), and set the request r_osd appropriately. If there is
25985edc 1208 * no up osd, set r_osd to NULL. Move the request to the appropriate list
6f6c7006 1209 * (unsent, homeless) or leave on in-flight lru.
f24e9980
SW
1210 *
1211 * Return 0 if unchanged, 1 if changed, or negative on error.
1212 *
1213 * Caller should hold map_sem for read and request_mutex.
1214 */
6f6c7006 1215static int __map_request(struct ceph_osd_client *osdc,
38d6453c 1216 struct ceph_osd_request *req, int force_resend)
f24e9980 1217{
5b191d99 1218 struct ceph_pg pgid;
d85b7056
SW
1219 int acting[CEPH_PG_MAX_SIZE];
1220 int o = -1, num = 0;
f24e9980 1221 int err;
f24e9980 1222
6f6c7006 1223 dout("map_request %p tid %lld\n", req, req->r_tid);
41766f87
AE
1224 err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap,
1225 ceph_file_layout_pg_pool(req->r_file_layout));
6f6c7006
SW
1226 if (err) {
1227 list_move(&req->r_req_lru_item, &osdc->req_notarget);
f24e9980 1228 return err;
6f6c7006 1229 }
7740a42f
SW
1230 req->r_pgid = pgid;
1231
d85b7056
SW
1232 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
1233 if (err > 0) {
1234 o = acting[0];
1235 num = err;
1236 }
f24e9980 1237
38d6453c
SW
1238 if ((!force_resend &&
1239 req->r_osd && req->r_osd->o_osd == o &&
d85b7056
SW
1240 req->r_sent >= req->r_osd->o_incarnation &&
1241 req->r_num_pg_osds == num &&
1242 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
f24e9980
SW
1243 (req->r_osd == NULL && o == -1))
1244 return 0; /* no change */
1245
5b191d99
SW
1246 dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
1247 req->r_tid, pgid.pool, pgid.seed, o,
f24e9980
SW
1248 req->r_osd ? req->r_osd->o_osd : -1);
1249
d85b7056
SW
1250 /* record full pg acting set */
1251 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
1252 req->r_num_pg_osds = num;
1253
f24e9980
SW
1254 if (req->r_osd) {
1255 __cancel_request(req);
1256 list_del_init(&req->r_osd_item);
f24e9980
SW
1257 req->r_osd = NULL;
1258 }
1259
1260 req->r_osd = __lookup_osd(osdc, o);
1261 if (!req->r_osd && o >= 0) {
c99eb1c7 1262 err = -ENOMEM;
e10006f8 1263 req->r_osd = create_osd(osdc, o);
6f6c7006
SW
1264 if (!req->r_osd) {
1265 list_move(&req->r_req_lru_item, &osdc->req_notarget);
c99eb1c7 1266 goto out;
6f6c7006 1267 }
f24e9980 1268
6f6c7006 1269 dout("map_request osd %p is osd%d\n", req->r_osd, o);
f24e9980
SW
1270 __insert_osd(osdc, req->r_osd);
1271
b7a9e5dd
SW
1272 ceph_con_open(&req->r_osd->o_con,
1273 CEPH_ENTITY_TYPE_OSD, o,
1274 &osdc->osdmap->osd_addr[o]);
f24e9980
SW
1275 }
1276
f5a2041b
YS
1277 if (req->r_osd) {
1278 __remove_osd_from_lru(req->r_osd);
ad885927
AE
1279 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
1280 list_move_tail(&req->r_req_lru_item, &osdc->req_unsent);
6f6c7006 1281 } else {
ad885927 1282 list_move_tail(&req->r_req_lru_item, &osdc->req_notarget);
f5a2041b 1283 }
d85b7056 1284 err = 1; /* osd or pg changed */
f24e9980
SW
1285
1286out:
f24e9980
SW
1287 return err;
1288}
1289
1290/*
1291 * caller should hold map_sem (for read) and request_mutex
1292 */
56e925b6
SW
1293static void __send_request(struct ceph_osd_client *osdc,
1294 struct ceph_osd_request *req)
f24e9980 1295{
1b83bef2 1296 void *p;
f24e9980 1297
1b83bef2
SW
1298 dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
1299 req, req->r_tid, req->r_osd->o_osd, req->r_flags,
1300 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
1301
1302 /* fill in message content that changes each time we send it */
1303 put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
1304 put_unaligned_le32(req->r_flags, req->r_request_flags);
1305 put_unaligned_le64(req->r_pgid.pool, req->r_request_pool);
1306 p = req->r_request_pgid;
1307 ceph_encode_64(&p, req->r_pgid.pool);
1308 ceph_encode_32(&p, req->r_pgid.seed);
1309 put_unaligned_le64(1, req->r_request_attempts); /* FIXME */
1310 memcpy(req->r_request_reassert_version, &req->r_reassert_version,
1311 sizeof(req->r_reassert_version));
2169aea6 1312
3dd72fc0 1313 req->r_stamp = jiffies;
07a27e22 1314 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
f24e9980
SW
1315
1316 ceph_msg_get(req->r_request); /* send consumes a ref */
26be8808
AE
1317
1318 /* Mark the request unsafe if this is the first timet's being sent. */
1319
1320 if (!req->r_sent && req->r_unsafe_callback)
1321 req->r_unsafe_callback(req, true);
f24e9980 1322 req->r_sent = req->r_osd->o_incarnation;
26be8808
AE
1323
1324 ceph_con_send(&req->r_osd->o_con, req->r_request);
f24e9980
SW
1325}
1326
6f6c7006
SW
1327/*
1328 * Send any requests in the queue (req_unsent).
1329 */
f9d25199 1330static void __send_queued(struct ceph_osd_client *osdc)
6f6c7006
SW
1331{
1332 struct ceph_osd_request *req, *tmp;
1333
f9d25199
AE
1334 dout("__send_queued\n");
1335 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
6f6c7006 1336 __send_request(osdc, req);
6f6c7006
SW
1337}
1338
f24e9980
SW
1339/*
1340 * Timeout callback, called every N seconds when 1 or more osd
1341 * requests has been active for more than N seconds. When this
1342 * happens, we ping all OSDs with requests who have timed out to
1343 * ensure any communications channel reset is detected. Reset the
1344 * request timeouts another N seconds in the future as we go.
1345 * Reschedule the timeout event another N seconds in future (unless
1346 * there are no open requests).
1347 */
1348static void handle_timeout(struct work_struct *work)
1349{
1350 struct ceph_osd_client *osdc =
1351 container_of(work, struct ceph_osd_client, timeout_work.work);
83aff95e 1352 struct ceph_osd_request *req;
f24e9980 1353 struct ceph_osd *osd;
422d2cb8 1354 unsigned long keepalive =
3d14c5d2 1355 osdc->client->options->osd_keepalive_timeout * HZ;
422d2cb8 1356 struct list_head slow_osds;
f24e9980
SW
1357 dout("timeout\n");
1358 down_read(&osdc->map_sem);
1359
1360 ceph_monc_request_next_osdmap(&osdc->client->monc);
1361
1362 mutex_lock(&osdc->request_mutex);
f24e9980 1363
422d2cb8
YS
1364 /*
1365 * ping osds that are a bit slow. this ensures that if there
1366 * is a break in the TCP connection we will notice, and reopen
1367 * a connection with that osd (from the fault callback).
1368 */
1369 INIT_LIST_HEAD(&slow_osds);
1370 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
3dd72fc0 1371 if (time_before(jiffies, req->r_stamp + keepalive))
422d2cb8
YS
1372 break;
1373
1374 osd = req->r_osd;
1375 BUG_ON(!osd);
1376 dout(" tid %llu is slow, will send keepalive on osd%d\n",
f24e9980 1377 req->r_tid, osd->o_osd);
422d2cb8
YS
1378 list_move_tail(&osd->o_keepalive_item, &slow_osds);
1379 }
1380 while (!list_empty(&slow_osds)) {
1381 osd = list_entry(slow_osds.next, struct ceph_osd,
1382 o_keepalive_item);
1383 list_del_init(&osd->o_keepalive_item);
f24e9980
SW
1384 ceph_con_keepalive(&osd->o_con);
1385 }
1386
422d2cb8 1387 __schedule_osd_timeout(osdc);
f9d25199 1388 __send_queued(osdc);
f24e9980 1389 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1390 up_read(&osdc->map_sem);
1391}
1392
f5a2041b
YS
1393static void handle_osds_timeout(struct work_struct *work)
1394{
1395 struct ceph_osd_client *osdc =
1396 container_of(work, struct ceph_osd_client,
1397 osds_timeout_work.work);
1398 unsigned long delay =
3d14c5d2 1399 osdc->client->options->osd_idle_ttl * HZ >> 2;
f5a2041b
YS
1400
1401 dout("osds timeout\n");
1402 down_read(&osdc->map_sem);
aca420bc 1403 remove_old_osds(osdc);
f5a2041b
YS
1404 up_read(&osdc->map_sem);
1405
1406 schedule_delayed_work(&osdc->osds_timeout_work,
1407 round_jiffies_relative(delay));
1408}
1409
25845472
SW
1410static void complete_request(struct ceph_osd_request *req)
1411{
26be8808
AE
1412 if (req->r_unsafe_callback)
1413 req->r_unsafe_callback(req, false);
25845472
SW
1414 complete_all(&req->r_safe_completion); /* fsync waiter */
1415}
1416
f24e9980
SW
1417/*
1418 * handle osd op reply. either call the callback if it is specified,
1419 * or do the completion to wake up the waiting thread.
1420 */
350b1c32
SW
1421static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1422 struct ceph_connection *con)
f24e9980 1423{
1b83bef2 1424 void *p, *end;
f24e9980
SW
1425 struct ceph_osd_request *req;
1426 u64 tid;
1b83bef2 1427 int object_len;
79528734
AE
1428 unsigned int numops;
1429 int payload_len, flags;
0ceed5db 1430 s32 result;
1b83bef2
SW
1431 s32 retry_attempt;
1432 struct ceph_pg pg;
1433 int err;
1434 u32 reassert_epoch;
1435 u64 reassert_version;
1436 u32 osdmap_epoch;
0d5af164 1437 int already_completed;
9fc6e064 1438 u32 bytes;
79528734 1439 unsigned int i;
f24e9980 1440
6df058c0 1441 tid = le64_to_cpu(msg->hdr.tid);
1b83bef2
SW
1442 dout("handle_reply %p tid %llu\n", msg, tid);
1443
1444 p = msg->front.iov_base;
1445 end = p + msg->front.iov_len;
1446
1447 ceph_decode_need(&p, end, 4, bad);
1448 object_len = ceph_decode_32(&p);
1449 ceph_decode_need(&p, end, object_len, bad);
1450 p += object_len;
1451
ef4859d6 1452 err = ceph_decode_pgid(&p, end, &pg);
1b83bef2 1453 if (err)
f24e9980 1454 goto bad;
1b83bef2
SW
1455
1456 ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
1457 flags = ceph_decode_64(&p);
1458 result = ceph_decode_32(&p);
1459 reassert_epoch = ceph_decode_32(&p);
1460 reassert_version = ceph_decode_64(&p);
1461 osdmap_epoch = ceph_decode_32(&p);
1462
f24e9980
SW
1463 /* lookup */
1464 mutex_lock(&osdc->request_mutex);
1465 req = __lookup_request(osdc, tid);
1466 if (req == NULL) {
1467 dout("handle_reply tid %llu dne\n", tid);
8058fd45 1468 goto bad_mutex;
f24e9980
SW
1469 }
1470 ceph_osdc_get_request(req);
1b83bef2
SW
1471
1472 dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
1473 req, result);
1474
1475 ceph_decode_need(&p, end, 4, bad);
1476 numops = ceph_decode_32(&p);
1477 if (numops > CEPH_OSD_MAX_OP)
1478 goto bad_put;
1479 if (numops != req->r_num_ops)
1480 goto bad_put;
1481 payload_len = 0;
1482 ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad);
1483 for (i = 0; i < numops; i++) {
1484 struct ceph_osd_op *op = p;
1485 int len;
1486
1487 len = le32_to_cpu(op->payload_len);
1488 req->r_reply_op_len[i] = len;
1489 dout(" op %d has %d bytes\n", i, len);
1490 payload_len += len;
1491 p += sizeof(*op);
1492 }
9fc6e064
AE
1493 bytes = le32_to_cpu(msg->hdr.data_len);
1494 if (payload_len != bytes) {
1b83bef2 1495 pr_warning("sum of op payload lens %d != data_len %d",
9fc6e064 1496 payload_len, bytes);
1b83bef2
SW
1497 goto bad_put;
1498 }
1499
1500 ceph_decode_need(&p, end, 4 + numops * 4, bad);
1501 retry_attempt = ceph_decode_32(&p);
1502 for (i = 0; i < numops; i++)
1503 req->r_reply_op_result[i] = ceph_decode_32(&p);
f24e9980 1504
f24e9980 1505 if (!req->r_got_reply) {
f24e9980 1506
1b83bef2 1507 req->r_result = result;
f24e9980
SW
1508 dout("handle_reply result %d bytes %d\n", req->r_result,
1509 bytes);
1510 if (req->r_result == 0)
1511 req->r_result = bytes;
1512
1513 /* in case this is a write and we need to replay, */
1b83bef2
SW
1514 req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
1515 req->r_reassert_version.version = cpu_to_le64(reassert_version);
f24e9980
SW
1516
1517 req->r_got_reply = 1;
1518 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1519 dout("handle_reply tid %llu dup ack\n", tid);
34b43a56 1520 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1521 goto done;
1522 }
1523
1524 dout("handle_reply tid %llu flags %d\n", tid, flags);
1525
a40c4f10
YS
1526 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
1527 __register_linger_request(osdc, req);
1528
f24e9980 1529 /* either this is a read, or we got the safe response */
0ceed5db
SW
1530 if (result < 0 ||
1531 (flags & CEPH_OSD_FLAG_ONDISK) ||
f24e9980
SW
1532 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
1533 __unregister_request(osdc, req);
1534
0d5af164
AE
1535 already_completed = req->r_completed;
1536 req->r_completed = 1;
f24e9980 1537 mutex_unlock(&osdc->request_mutex);
0d5af164
AE
1538 if (already_completed)
1539 goto done;
f24e9980
SW
1540
1541 if (req->r_callback)
1542 req->r_callback(req, msg);
1543 else
03066f23 1544 complete_all(&req->r_completion);
f24e9980 1545
25845472
SW
1546 if (flags & CEPH_OSD_FLAG_ONDISK)
1547 complete_request(req);
f24e9980
SW
1548
1549done:
a40c4f10 1550 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
f24e9980
SW
1551 ceph_osdc_put_request(req);
1552 return;
1553
1b83bef2
SW
1554bad_put:
1555 ceph_osdc_put_request(req);
8058fd45
AE
1556bad_mutex:
1557 mutex_unlock(&osdc->request_mutex);
f24e9980 1558bad:
1b83bef2
SW
1559 pr_err("corrupt osd_op_reply got %d %d\n",
1560 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
9ec7cab1 1561 ceph_msg_dump(msg);
f24e9980
SW
1562}
1563
6f6c7006 1564static void reset_changed_osds(struct ceph_osd_client *osdc)
f24e9980 1565{
f24e9980 1566 struct rb_node *p, *n;
f24e9980 1567
6f6c7006
SW
1568 for (p = rb_first(&osdc->osds); p; p = n) {
1569 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
f24e9980 1570
6f6c7006
SW
1571 n = rb_next(p);
1572 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1573 memcmp(&osd->o_con.peer_addr,
1574 ceph_osd_addr(osdc->osdmap,
1575 osd->o_osd),
1576 sizeof(struct ceph_entity_addr)) != 0)
1577 __reset_osd(osdc, osd);
f24e9980 1578 }
422d2cb8
YS
1579}
1580
1581/*
6f6c7006
SW
1582 * Requeue requests whose mapping to an OSD has changed. If requests map to
1583 * no osd, request a new map.
422d2cb8 1584 *
e6d50f67 1585 * Caller should hold map_sem for read.
422d2cb8 1586 */
38d6453c 1587static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
422d2cb8 1588{
a40c4f10 1589 struct ceph_osd_request *req, *nreq;
6f6c7006
SW
1590 struct rb_node *p;
1591 int needmap = 0;
1592 int err;
422d2cb8 1593
38d6453c 1594 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
422d2cb8 1595 mutex_lock(&osdc->request_mutex);
6194ea89 1596 for (p = rb_first(&osdc->requests); p; ) {
6f6c7006 1597 req = rb_entry(p, struct ceph_osd_request, r_node);
6194ea89 1598 p = rb_next(p);
ab60b16d
AE
1599
1600 /*
1601 * For linger requests that have not yet been
1602 * registered, move them to the linger list; they'll
1603 * be sent to the osd in the loop below. Unregister
1604 * the request before re-registering it as a linger
1605 * request to ensure the __map_request() below
1606 * will decide it needs to be sent.
1607 */
1608 if (req->r_linger && list_empty(&req->r_linger_item)) {
1609 dout("%p tid %llu restart on osd%d\n",
1610 req, req->r_tid,
1611 req->r_osd ? req->r_osd->o_osd : -1);
1612 __unregister_request(osdc, req);
1613 __register_linger_request(osdc, req);
1614 continue;
1615 }
1616
38d6453c 1617 err = __map_request(osdc, req, force_resend);
6f6c7006
SW
1618 if (err < 0)
1619 continue; /* error */
1620 if (req->r_osd == NULL) {
1621 dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1622 needmap++; /* request a newer map */
1623 } else if (err > 0) {
6194ea89
SW
1624 if (!req->r_linger) {
1625 dout("%p tid %llu requeued on osd%d\n", req,
1626 req->r_tid,
1627 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1628 req->r_flags |= CEPH_OSD_FLAG_RETRY;
6194ea89
SW
1629 }
1630 }
a40c4f10
YS
1631 }
1632
1633 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
1634 r_linger_item) {
1635 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1636
38d6453c 1637 err = __map_request(osdc, req, force_resend);
ab60b16d 1638 dout("__map_request returned %d\n", err);
a40c4f10
YS
1639 if (err == 0)
1640 continue; /* no change and no osd was specified */
1641 if (err < 0)
1642 continue; /* hrm! */
1643 if (req->r_osd == NULL) {
1644 dout("tid %llu maps to no valid osd\n", req->r_tid);
1645 needmap++; /* request a newer map */
1646 continue;
6f6c7006 1647 }
a40c4f10
YS
1648
1649 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1650 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1651 __register_request(osdc, req);
c89ce05e 1652 __unregister_linger_request(osdc, req);
6f6c7006 1653 }
f24e9980
SW
1654 mutex_unlock(&osdc->request_mutex);
1655
1656 if (needmap) {
1657 dout("%d requests for down osds, need new map\n", needmap);
1658 ceph_monc_request_next_osdmap(&osdc->client->monc);
1659 }
e6d50f67 1660 reset_changed_osds(osdc);
422d2cb8 1661}
6f6c7006
SW
1662
1663
f24e9980
SW
1664/*
1665 * Process updated osd map.
1666 *
1667 * The message contains any number of incremental and full maps, normally
1668 * indicating some sort of topology change in the cluster. Kick requests
1669 * off to different OSDs as needed.
1670 */
1671void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1672{
1673 void *p, *end, *next;
1674 u32 nr_maps, maplen;
1675 u32 epoch;
1676 struct ceph_osdmap *newmap = NULL, *oldmap;
1677 int err;
1678 struct ceph_fsid fsid;
1679
1680 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
1681 p = msg->front.iov_base;
1682 end = p + msg->front.iov_len;
1683
1684 /* verify fsid */
1685 ceph_decode_need(&p, end, sizeof(fsid), bad);
1686 ceph_decode_copy(&p, &fsid, sizeof(fsid));
0743304d
SW
1687 if (ceph_check_fsid(osdc->client, &fsid) < 0)
1688 return;
f24e9980
SW
1689
1690 down_write(&osdc->map_sem);
1691
1692 /* incremental maps */
1693 ceph_decode_32_safe(&p, end, nr_maps, bad);
1694 dout(" %d inc maps\n", nr_maps);
1695 while (nr_maps > 0) {
1696 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1697 epoch = ceph_decode_32(&p);
1698 maplen = ceph_decode_32(&p);
f24e9980
SW
1699 ceph_decode_need(&p, end, maplen, bad);
1700 next = p + maplen;
1701 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
1702 dout("applying incremental map %u len %d\n",
1703 epoch, maplen);
1704 newmap = osdmap_apply_incremental(&p, next,
1705 osdc->osdmap,
15d9882c 1706 &osdc->client->msgr);
f24e9980
SW
1707 if (IS_ERR(newmap)) {
1708 err = PTR_ERR(newmap);
1709 goto bad;
1710 }
30dc6381 1711 BUG_ON(!newmap);
f24e9980
SW
1712 if (newmap != osdc->osdmap) {
1713 ceph_osdmap_destroy(osdc->osdmap);
1714 osdc->osdmap = newmap;
1715 }
38d6453c 1716 kick_requests(osdc, 0);
f24e9980
SW
1717 } else {
1718 dout("ignoring incremental map %u len %d\n",
1719 epoch, maplen);
1720 }
1721 p = next;
1722 nr_maps--;
1723 }
1724 if (newmap)
1725 goto done;
1726
1727 /* full maps */
1728 ceph_decode_32_safe(&p, end, nr_maps, bad);
1729 dout(" %d full maps\n", nr_maps);
1730 while (nr_maps) {
1731 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1732 epoch = ceph_decode_32(&p);
1733 maplen = ceph_decode_32(&p);
f24e9980
SW
1734 ceph_decode_need(&p, end, maplen, bad);
1735 if (nr_maps > 1) {
1736 dout("skipping non-latest full map %u len %d\n",
1737 epoch, maplen);
1738 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
1739 dout("skipping full map %u len %d, "
1740 "older than our %u\n", epoch, maplen,
1741 osdc->osdmap->epoch);
1742 } else {
38d6453c
SW
1743 int skipped_map = 0;
1744
f24e9980
SW
1745 dout("taking full map %u len %d\n", epoch, maplen);
1746 newmap = osdmap_decode(&p, p+maplen);
1747 if (IS_ERR(newmap)) {
1748 err = PTR_ERR(newmap);
1749 goto bad;
1750 }
30dc6381 1751 BUG_ON(!newmap);
f24e9980
SW
1752 oldmap = osdc->osdmap;
1753 osdc->osdmap = newmap;
38d6453c
SW
1754 if (oldmap) {
1755 if (oldmap->epoch + 1 < newmap->epoch)
1756 skipped_map = 1;
f24e9980 1757 ceph_osdmap_destroy(oldmap);
38d6453c
SW
1758 }
1759 kick_requests(osdc, skipped_map);
f24e9980
SW
1760 }
1761 p += maplen;
1762 nr_maps--;
1763 }
1764
1765done:
1766 downgrade_write(&osdc->map_sem);
1767 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
cd634fb6
SW
1768
1769 /*
1770 * subscribe to subsequent osdmap updates if full to ensure
1771 * we find out when we are no longer full and stop returning
1772 * ENOSPC.
1773 */
1774 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1775 ceph_monc_request_next_osdmap(&osdc->client->monc);
1776
f9d25199
AE
1777 mutex_lock(&osdc->request_mutex);
1778 __send_queued(osdc);
1779 mutex_unlock(&osdc->request_mutex);
f24e9980 1780 up_read(&osdc->map_sem);
03066f23 1781 wake_up_all(&osdc->client->auth_wq);
f24e9980
SW
1782 return;
1783
1784bad:
1785 pr_err("osdc handle_map corrupt msg\n");
9ec7cab1 1786 ceph_msg_dump(msg);
f24e9980
SW
1787 up_write(&osdc->map_sem);
1788 return;
1789}
1790
a40c4f10
YS
1791/*
1792 * watch/notify callback event infrastructure
1793 *
1794 * These callbacks are used both for watch and notify operations.
1795 */
1796static void __release_event(struct kref *kref)
1797{
1798 struct ceph_osd_event *event =
1799 container_of(kref, struct ceph_osd_event, kref);
1800
1801 dout("__release_event %p\n", event);
1802 kfree(event);
1803}
1804
1805static void get_event(struct ceph_osd_event *event)
1806{
1807 kref_get(&event->kref);
1808}
1809
1810void ceph_osdc_put_event(struct ceph_osd_event *event)
1811{
1812 kref_put(&event->kref, __release_event);
1813}
1814EXPORT_SYMBOL(ceph_osdc_put_event);
1815
1816static void __insert_event(struct ceph_osd_client *osdc,
1817 struct ceph_osd_event *new)
1818{
1819 struct rb_node **p = &osdc->event_tree.rb_node;
1820 struct rb_node *parent = NULL;
1821 struct ceph_osd_event *event = NULL;
1822
1823 while (*p) {
1824 parent = *p;
1825 event = rb_entry(parent, struct ceph_osd_event, node);
1826 if (new->cookie < event->cookie)
1827 p = &(*p)->rb_left;
1828 else if (new->cookie > event->cookie)
1829 p = &(*p)->rb_right;
1830 else
1831 BUG();
1832 }
1833
1834 rb_link_node(&new->node, parent, p);
1835 rb_insert_color(&new->node, &osdc->event_tree);
1836}
1837
1838static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
1839 u64 cookie)
1840{
1841 struct rb_node **p = &osdc->event_tree.rb_node;
1842 struct rb_node *parent = NULL;
1843 struct ceph_osd_event *event = NULL;
1844
1845 while (*p) {
1846 parent = *p;
1847 event = rb_entry(parent, struct ceph_osd_event, node);
1848 if (cookie < event->cookie)
1849 p = &(*p)->rb_left;
1850 else if (cookie > event->cookie)
1851 p = &(*p)->rb_right;
1852 else
1853 return event;
1854 }
1855 return NULL;
1856}
1857
1858static void __remove_event(struct ceph_osd_event *event)
1859{
1860 struct ceph_osd_client *osdc = event->osdc;
1861
1862 if (!RB_EMPTY_NODE(&event->node)) {
1863 dout("__remove_event removed %p\n", event);
1864 rb_erase(&event->node, &osdc->event_tree);
1865 ceph_osdc_put_event(event);
1866 } else {
1867 dout("__remove_event didn't remove %p\n", event);
1868 }
1869}
1870
1871int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1872 void (*event_cb)(u64, u64, u8, void *),
3c663bbd 1873 void *data, struct ceph_osd_event **pevent)
a40c4f10
YS
1874{
1875 struct ceph_osd_event *event;
1876
1877 event = kmalloc(sizeof(*event), GFP_NOIO);
1878 if (!event)
1879 return -ENOMEM;
1880
1881 dout("create_event %p\n", event);
1882 event->cb = event_cb;
3c663bbd 1883 event->one_shot = 0;
a40c4f10
YS
1884 event->data = data;
1885 event->osdc = osdc;
1886 INIT_LIST_HEAD(&event->osd_node);
3ee5234d 1887 RB_CLEAR_NODE(&event->node);
a40c4f10
YS
1888 kref_init(&event->kref); /* one ref for us */
1889 kref_get(&event->kref); /* one ref for the caller */
a40c4f10
YS
1890
1891 spin_lock(&osdc->event_lock);
1892 event->cookie = ++osdc->event_count;
1893 __insert_event(osdc, event);
1894 spin_unlock(&osdc->event_lock);
1895
1896 *pevent = event;
1897 return 0;
1898}
1899EXPORT_SYMBOL(ceph_osdc_create_event);
1900
1901void ceph_osdc_cancel_event(struct ceph_osd_event *event)
1902{
1903 struct ceph_osd_client *osdc = event->osdc;
1904
1905 dout("cancel_event %p\n", event);
1906 spin_lock(&osdc->event_lock);
1907 __remove_event(event);
1908 spin_unlock(&osdc->event_lock);
1909 ceph_osdc_put_event(event); /* caller's */
1910}
1911EXPORT_SYMBOL(ceph_osdc_cancel_event);
1912
1913
1914static void do_event_work(struct work_struct *work)
1915{
1916 struct ceph_osd_event_work *event_work =
1917 container_of(work, struct ceph_osd_event_work, work);
1918 struct ceph_osd_event *event = event_work->event;
1919 u64 ver = event_work->ver;
1920 u64 notify_id = event_work->notify_id;
1921 u8 opcode = event_work->opcode;
1922
1923 dout("do_event_work completing %p\n", event);
1924 event->cb(ver, notify_id, opcode, event->data);
a40c4f10
YS
1925 dout("do_event_work completed %p\n", event);
1926 ceph_osdc_put_event(event);
1927 kfree(event_work);
1928}
1929
1930
1931/*
1932 * Process osd watch notifications
1933 */
3c663bbd
AE
1934static void handle_watch_notify(struct ceph_osd_client *osdc,
1935 struct ceph_msg *msg)
a40c4f10
YS
1936{
1937 void *p, *end;
1938 u8 proto_ver;
1939 u64 cookie, ver, notify_id;
1940 u8 opcode;
1941 struct ceph_osd_event *event;
1942 struct ceph_osd_event_work *event_work;
1943
1944 p = msg->front.iov_base;
1945 end = p + msg->front.iov_len;
1946
1947 ceph_decode_8_safe(&p, end, proto_ver, bad);
1948 ceph_decode_8_safe(&p, end, opcode, bad);
1949 ceph_decode_64_safe(&p, end, cookie, bad);
1950 ceph_decode_64_safe(&p, end, ver, bad);
1951 ceph_decode_64_safe(&p, end, notify_id, bad);
1952
1953 spin_lock(&osdc->event_lock);
1954 event = __find_event(osdc, cookie);
1955 if (event) {
3c663bbd 1956 BUG_ON(event->one_shot);
a40c4f10 1957 get_event(event);
a40c4f10
YS
1958 }
1959 spin_unlock(&osdc->event_lock);
1960 dout("handle_watch_notify cookie %lld ver %lld event %p\n",
1961 cookie, ver, event);
1962 if (event) {
1963 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
a40c4f10
YS
1964 if (!event_work) {
1965 dout("ERROR: could not allocate event_work\n");
1966 goto done_err;
1967 }
6b0ae409 1968 INIT_WORK(&event_work->work, do_event_work);
a40c4f10
YS
1969 event_work->event = event;
1970 event_work->ver = ver;
1971 event_work->notify_id = notify_id;
1972 event_work->opcode = opcode;
1973 if (!queue_work(osdc->notify_wq, &event_work->work)) {
1974 dout("WARNING: failed to queue notify event work\n");
1975 goto done_err;
1976 }
1977 }
1978
1979 return;
1980
1981done_err:
a40c4f10
YS
1982 ceph_osdc_put_event(event);
1983 return;
1984
1985bad:
1986 pr_err("osdc handle_watch_notify corrupt msg\n");
1987 return;
1988}
1989
e65550fd
AE
1990/*
1991 * build new request AND message
1992 *
1993 */
1994void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
1995 struct ceph_snap_context *snapc, u64 snap_id,
1996 struct timespec *mtime)
1997{
1998 struct ceph_msg *msg = req->r_request;
1999 void *p;
2000 size_t msg_size;
2001 int flags = req->r_flags;
2002 u64 data_len;
2003 unsigned int i;
2004
2005 req->r_snapid = snap_id;
2006 req->r_snapc = ceph_get_snap_context(snapc);
2007
2008 /* encode request */
2009 msg->hdr.version = cpu_to_le16(4);
2010
2011 p = msg->front.iov_base;
2012 ceph_encode_32(&p, 1); /* client_inc is always 1 */
2013 req->r_request_osdmap_epoch = p;
2014 p += 4;
2015 req->r_request_flags = p;
2016 p += 4;
2017 if (req->r_flags & CEPH_OSD_FLAG_WRITE)
2018 ceph_encode_timespec(p, mtime);
2019 p += sizeof(struct ceph_timespec);
2020 req->r_request_reassert_version = p;
2021 p += sizeof(struct ceph_eversion); /* will get filled in */
2022
2023 /* oloc */
2024 ceph_encode_8(&p, 4);
2025 ceph_encode_8(&p, 4);
2026 ceph_encode_32(&p, 8 + 4 + 4);
2027 req->r_request_pool = p;
2028 p += 8;
2029 ceph_encode_32(&p, -1); /* preferred */
2030 ceph_encode_32(&p, 0); /* key len */
2031
2032 ceph_encode_8(&p, 1);
2033 req->r_request_pgid = p;
2034 p += 8 + 4;
2035 ceph_encode_32(&p, -1); /* preferred */
2036
2037 /* oid */
2038 ceph_encode_32(&p, req->r_oid_len);
2039 memcpy(p, req->r_oid, req->r_oid_len);
2040 dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len);
2041 p += req->r_oid_len;
2042
2043 /* ops--can imply data */
2044 ceph_encode_16(&p, (u16)req->r_num_ops);
2045 data_len = 0;
2046 for (i = 0; i < req->r_num_ops; i++) {
2047 data_len += osd_req_encode_op(req, p, i);
2048 p += sizeof(struct ceph_osd_op);
2049 }
2050
2051 /* snaps */
2052 ceph_encode_64(&p, req->r_snapid);
2053 ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
2054 ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
2055 if (req->r_snapc) {
2056 for (i = 0; i < snapc->num_snaps; i++) {
2057 ceph_encode_64(&p, req->r_snapc->snaps[i]);
2058 }
2059 }
2060
2061 req->r_request_attempts = p;
2062 p += 4;
2063
2064 /* data */
2065 if (flags & CEPH_OSD_FLAG_WRITE) {
2066 u16 data_off;
2067
2068 /*
2069 * The header "data_off" is a hint to the receiver
2070 * allowing it to align received data into its
2071 * buffers such that there's no need to re-copy
2072 * it before writing it to disk (direct I/O).
2073 */
2074 data_off = (u16) (off & 0xffff);
2075 req->r_request->hdr.data_off = cpu_to_le16(data_off);
2076 }
2077 req->r_request->hdr.data_len = cpu_to_le32(data_len);
2078
2079 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
2080 msg_size = p - msg->front.iov_base;
2081 msg->front.iov_len = msg_size;
2082 msg->hdr.front_len = cpu_to_le32(msg_size);
2083
2084 dout("build_request msg_size was %d\n", (int)msg_size);
2085}
2086EXPORT_SYMBOL(ceph_osdc_build_request);
2087
70636773
AE
2088/*
2089 * Register request, send initial attempt.
2090 */
2091int ceph_osdc_start_request(struct ceph_osd_client *osdc,
2092 struct ceph_osd_request *req,
2093 bool nofail)
2094{
2095 int rc = 0;
2096
f24e9980
SW
2097 down_read(&osdc->map_sem);
2098 mutex_lock(&osdc->request_mutex);
dc4b870c 2099 __register_request(osdc, req);
92451b49
AE
2100 WARN_ON(req->r_sent);
2101 rc = __map_request(osdc, req, 0);
2102 if (rc < 0) {
2103 if (nofail) {
2104 dout("osdc_start_request failed map, "
2105 " will retry %lld\n", req->r_tid);
2106 rc = 0;
f24e9980 2107 }
92451b49 2108 goto out_unlock;
f24e9980 2109 }
92451b49
AE
2110 if (req->r_osd == NULL) {
2111 dout("send_request %p no up osds in pg\n", req);
2112 ceph_monc_request_next_osdmap(&osdc->client->monc);
2113 } else {
7e2766a1 2114 __send_queued(osdc);
92451b49
AE
2115 }
2116 rc = 0;
234af26f 2117out_unlock:
f24e9980
SW
2118 mutex_unlock(&osdc->request_mutex);
2119 up_read(&osdc->map_sem);
2120 return rc;
2121}
3d14c5d2 2122EXPORT_SYMBOL(ceph_osdc_start_request);
f24e9980
SW
2123
2124/*
2125 * wait for a request to complete
2126 */
2127int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
2128 struct ceph_osd_request *req)
2129{
2130 int rc;
2131
2132 rc = wait_for_completion_interruptible(&req->r_completion);
2133 if (rc < 0) {
2134 mutex_lock(&osdc->request_mutex);
2135 __cancel_request(req);
529cfcc4 2136 __unregister_request(osdc, req);
f24e9980 2137 mutex_unlock(&osdc->request_mutex);
25845472 2138 complete_request(req);
529cfcc4 2139 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
f24e9980
SW
2140 return rc;
2141 }
2142
2143 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
2144 return req->r_result;
2145}
3d14c5d2 2146EXPORT_SYMBOL(ceph_osdc_wait_request);
f24e9980
SW
2147
2148/*
2149 * sync - wait for all in-flight requests to flush. avoid starvation.
2150 */
2151void ceph_osdc_sync(struct ceph_osd_client *osdc)
2152{
2153 struct ceph_osd_request *req;
2154 u64 last_tid, next_tid = 0;
2155
2156 mutex_lock(&osdc->request_mutex);
2157 last_tid = osdc->last_tid;
2158 while (1) {
2159 req = __lookup_request_ge(osdc, next_tid);
2160 if (!req)
2161 break;
2162 if (req->r_tid > last_tid)
2163 break;
2164
2165 next_tid = req->r_tid + 1;
2166 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
2167 continue;
2168
2169 ceph_osdc_get_request(req);
2170 mutex_unlock(&osdc->request_mutex);
2171 dout("sync waiting on tid %llu (last is %llu)\n",
2172 req->r_tid, last_tid);
2173 wait_for_completion(&req->r_safe_completion);
2174 mutex_lock(&osdc->request_mutex);
2175 ceph_osdc_put_request(req);
2176 }
2177 mutex_unlock(&osdc->request_mutex);
2178 dout("sync done (thru tid %llu)\n", last_tid);
2179}
3d14c5d2 2180EXPORT_SYMBOL(ceph_osdc_sync);
f24e9980
SW
2181
2182/*
2183 * init, shutdown
2184 */
2185int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
2186{
2187 int err;
2188
2189 dout("init\n");
2190 osdc->client = client;
2191 osdc->osdmap = NULL;
2192 init_rwsem(&osdc->map_sem);
2193 init_completion(&osdc->map_waiters);
2194 osdc->last_requested_map = 0;
2195 mutex_init(&osdc->request_mutex);
f24e9980
SW
2196 osdc->last_tid = 0;
2197 osdc->osds = RB_ROOT;
f5a2041b 2198 INIT_LIST_HEAD(&osdc->osd_lru);
f24e9980 2199 osdc->requests = RB_ROOT;
422d2cb8 2200 INIT_LIST_HEAD(&osdc->req_lru);
6f6c7006
SW
2201 INIT_LIST_HEAD(&osdc->req_unsent);
2202 INIT_LIST_HEAD(&osdc->req_notarget);
a40c4f10 2203 INIT_LIST_HEAD(&osdc->req_linger);
f24e9980
SW
2204 osdc->num_requests = 0;
2205 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
f5a2041b 2206 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
a40c4f10
YS
2207 spin_lock_init(&osdc->event_lock);
2208 osdc->event_tree = RB_ROOT;
2209 osdc->event_count = 0;
f5a2041b
YS
2210
2211 schedule_delayed_work(&osdc->osds_timeout_work,
3d14c5d2 2212 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
f24e9980 2213
5f44f142 2214 err = -ENOMEM;
f24e9980
SW
2215 osdc->req_mempool = mempool_create_kmalloc_pool(10,
2216 sizeof(struct ceph_osd_request));
2217 if (!osdc->req_mempool)
5f44f142 2218 goto out;
f24e9980 2219
d50b409f
SW
2220 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
2221 OSD_OP_FRONT_LEN, 10, true,
4f48280e 2222 "osd_op");
f24e9980 2223 if (err < 0)
5f44f142 2224 goto out_mempool;
d50b409f 2225 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4f48280e
SW
2226 OSD_OPREPLY_FRONT_LEN, 10, true,
2227 "osd_op_reply");
c16e7869
SW
2228 if (err < 0)
2229 goto out_msgpool;
a40c4f10
YS
2230
2231 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
2232 if (IS_ERR(osdc->notify_wq)) {
2233 err = PTR_ERR(osdc->notify_wq);
2234 osdc->notify_wq = NULL;
2235 goto out_msgpool;
2236 }
f24e9980 2237 return 0;
5f44f142 2238
c16e7869
SW
2239out_msgpool:
2240 ceph_msgpool_destroy(&osdc->msgpool_op);
5f44f142
SW
2241out_mempool:
2242 mempool_destroy(osdc->req_mempool);
2243out:
2244 return err;
f24e9980
SW
2245}
2246
2247void ceph_osdc_stop(struct ceph_osd_client *osdc)
2248{
a40c4f10
YS
2249 flush_workqueue(osdc->notify_wq);
2250 destroy_workqueue(osdc->notify_wq);
f24e9980 2251 cancel_delayed_work_sync(&osdc->timeout_work);
f5a2041b 2252 cancel_delayed_work_sync(&osdc->osds_timeout_work);
f24e9980
SW
2253 if (osdc->osdmap) {
2254 ceph_osdmap_destroy(osdc->osdmap);
2255 osdc->osdmap = NULL;
2256 }
aca420bc 2257 remove_all_osds(osdc);
f24e9980
SW
2258 mempool_destroy(osdc->req_mempool);
2259 ceph_msgpool_destroy(&osdc->msgpool_op);
c16e7869 2260 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
f24e9980
SW
2261}
2262
2263/*
2264 * Read some contiguous pages. If we cross a stripe boundary, shorten
2265 * *plen. Return number of bytes read, or error.
2266 */
2267int ceph_osdc_readpages(struct ceph_osd_client *osdc,
2268 struct ceph_vino vino, struct ceph_file_layout *layout,
2269 u64 off, u64 *plen,
2270 u32 truncate_seq, u64 truncate_size,
b7495fc2 2271 struct page **pages, int num_pages, int page_align)
f24e9980
SW
2272{
2273 struct ceph_osd_request *req;
2274 int rc = 0;
2275
2276 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
2277 vino.snap, off, *plen);
79528734 2278 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1,
f24e9980 2279 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
acead002 2280 NULL, truncate_seq, truncate_size,
153e5167 2281 false);
6816282d
SW
2282 if (IS_ERR(req))
2283 return PTR_ERR(req);
f24e9980
SW
2284
2285 /* it may be a short read due to an object boundary */
0fff87ec 2286
a4ce40a9
AE
2287 osd_req_op_extent_osd_data_pages(req, 0, false,
2288 pages, *plen, page_align, false, false);
f24e9980 2289
e0c59487 2290 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
43bfe5de 2291 off, *plen, *plen, page_align);
f24e9980 2292
79528734 2293 ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
02ee07d3 2294
f24e9980
SW
2295 rc = ceph_osdc_start_request(osdc, req, false);
2296 if (!rc)
2297 rc = ceph_osdc_wait_request(osdc, req);
2298
2299 ceph_osdc_put_request(req);
2300 dout("readpages result %d\n", rc);
2301 return rc;
2302}
3d14c5d2 2303EXPORT_SYMBOL(ceph_osdc_readpages);
f24e9980
SW
2304
2305/*
2306 * do a synchronous write on N pages
2307 */
2308int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
2309 struct ceph_file_layout *layout,
2310 struct ceph_snap_context *snapc,
2311 u64 off, u64 len,
2312 u32 truncate_seq, u64 truncate_size,
2313 struct timespec *mtime,
24808826 2314 struct page **pages, int num_pages)
f24e9980
SW
2315{
2316 struct ceph_osd_request *req;
2317 int rc = 0;
b7495fc2 2318 int page_align = off & ~PAGE_MASK;
f24e9980 2319
acead002 2320 BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */
79528734 2321 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1,
f24e9980 2322 CEPH_OSD_OP_WRITE,
24808826 2323 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
acead002 2324 snapc, truncate_seq, truncate_size,
153e5167 2325 true);
6816282d
SW
2326 if (IS_ERR(req))
2327 return PTR_ERR(req);
f24e9980
SW
2328
2329 /* it may be a short write due to an object boundary */
a4ce40a9 2330 osd_req_op_extent_osd_data_pages(req, 0, true, pages, len, page_align,
43bfe5de
AE
2331 false, false);
2332 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
f24e9980 2333
79528734 2334 ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime);
02ee07d3 2335
87f979d3 2336 rc = ceph_osdc_start_request(osdc, req, true);
f24e9980
SW
2337 if (!rc)
2338 rc = ceph_osdc_wait_request(osdc, req);
2339
2340 ceph_osdc_put_request(req);
2341 if (rc == 0)
2342 rc = len;
2343 dout("writepages result %d\n", rc);
2344 return rc;
2345}
3d14c5d2 2346EXPORT_SYMBOL(ceph_osdc_writepages);
f24e9980
SW
2347
2348/*
2349 * handle incoming message
2350 */
2351static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2352{
2353 struct ceph_osd *osd = con->private;
32c895e7 2354 struct ceph_osd_client *osdc;
f24e9980
SW
2355 int type = le16_to_cpu(msg->hdr.type);
2356
2357 if (!osd)
4a32f93d 2358 goto out;
32c895e7 2359 osdc = osd->o_osdc;
f24e9980
SW
2360
2361 switch (type) {
2362 case CEPH_MSG_OSD_MAP:
2363 ceph_osdc_handle_map(osdc, msg);
2364 break;
2365 case CEPH_MSG_OSD_OPREPLY:
350b1c32 2366 handle_reply(osdc, msg, con);
f24e9980 2367 break;
a40c4f10
YS
2368 case CEPH_MSG_WATCH_NOTIFY:
2369 handle_watch_notify(osdc, msg);
2370 break;
f24e9980
SW
2371
2372 default:
2373 pr_err("received unknown message type %d %s\n", type,
2374 ceph_msg_type_name(type));
2375 }
4a32f93d 2376out:
f24e9980
SW
2377 ceph_msg_put(msg);
2378}
2379
5b3a4db3 2380/*
21b667f6
SW
2381 * lookup and return message for incoming reply. set up reply message
2382 * pages.
5b3a4db3
SW
2383 */
2384static struct ceph_msg *get_reply(struct ceph_connection *con,
2450418c
YS
2385 struct ceph_msg_header *hdr,
2386 int *skip)
f24e9980
SW
2387{
2388 struct ceph_osd *osd = con->private;
2389 struct ceph_osd_client *osdc = osd->o_osdc;
2450418c 2390 struct ceph_msg *m;
0547a9b3 2391 struct ceph_osd_request *req;
5b3a4db3
SW
2392 int front = le32_to_cpu(hdr->front_len);
2393 int data_len = le32_to_cpu(hdr->data_len);
0547a9b3 2394 u64 tid;
f24e9980 2395
0547a9b3
YS
2396 tid = le64_to_cpu(hdr->tid);
2397 mutex_lock(&osdc->request_mutex);
2398 req = __lookup_request(osdc, tid);
2399 if (!req) {
2400 *skip = 1;
2401 m = NULL;
756a16a5
SW
2402 dout("get_reply unknown tid %llu from osd%d\n", tid,
2403 osd->o_osd);
0547a9b3
YS
2404 goto out;
2405 }
c16e7869 2406
ace6d3a9 2407 if (req->r_reply->con)
8921d114 2408 dout("%s revoking msg %p from old con %p\n", __func__,
ace6d3a9
AE
2409 req->r_reply, req->r_reply->con);
2410 ceph_msg_revoke_incoming(req->r_reply);
0547a9b3 2411
c16e7869
SW
2412 if (front > req->r_reply->front.iov_len) {
2413 pr_warning("get_reply front %d > preallocated %d\n",
2414 front, (int)req->r_reply->front.iov_len);
b61c2763 2415 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
a79832f2 2416 if (!m)
c16e7869
SW
2417 goto out;
2418 ceph_msg_put(req->r_reply);
2419 req->r_reply = m;
2420 }
2421 m = ceph_msg_get(req->r_reply);
2422
0547a9b3 2423 if (data_len > 0) {
a4ce40a9 2424 struct ceph_osd_data *osd_data;
0fff87ec 2425
a4ce40a9
AE
2426 /*
2427 * XXX This is assuming there is only one op containing
2428 * XXX page data. Probably OK for reads, but this
2429 * XXX ought to be done more generally.
2430 */
2431 osd_data = osd_req_op_extent_osd_data(req, 0, false);
0fff87ec 2432 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
0fff87ec 2433 if (osd_data->pages &&
e0c59487 2434 unlikely(osd_data->length < data_len)) {
2ac2b7a6 2435
e0c59487
AE
2436 pr_warning("tid %lld reply has %d bytes "
2437 "we had only %llu bytes ready\n",
2438 tid, data_len, osd_data->length);
2ac2b7a6
AE
2439 *skip = 1;
2440 ceph_msg_put(m);
2441 m = NULL;
2442 goto out;
2443 }
2ac2b7a6 2444 }
0547a9b3 2445 }
5b3a4db3 2446 *skip = 0;
c16e7869 2447 dout("get_reply tid %lld %p\n", tid, m);
0547a9b3
YS
2448
2449out:
2450 mutex_unlock(&osdc->request_mutex);
2450418c 2451 return m;
5b3a4db3
SW
2452
2453}
2454
2455static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2456 struct ceph_msg_header *hdr,
2457 int *skip)
2458{
2459 struct ceph_osd *osd = con->private;
2460 int type = le16_to_cpu(hdr->type);
2461 int front = le32_to_cpu(hdr->front_len);
2462
1c20f2d2 2463 *skip = 0;
5b3a4db3
SW
2464 switch (type) {
2465 case CEPH_MSG_OSD_MAP:
a40c4f10 2466 case CEPH_MSG_WATCH_NOTIFY:
b61c2763 2467 return ceph_msg_new(type, front, GFP_NOFS, false);
5b3a4db3
SW
2468 case CEPH_MSG_OSD_OPREPLY:
2469 return get_reply(con, hdr, skip);
2470 default:
2471 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
2472 osd->o_osd);
2473 *skip = 1;
2474 return NULL;
2475 }
f24e9980
SW
2476}
2477
2478/*
2479 * Wrappers to refcount containing ceph_osd struct
2480 */
2481static struct ceph_connection *get_osd_con(struct ceph_connection *con)
2482{
2483 struct ceph_osd *osd = con->private;
2484 if (get_osd(osd))
2485 return con;
2486 return NULL;
2487}
2488
2489static void put_osd_con(struct ceph_connection *con)
2490{
2491 struct ceph_osd *osd = con->private;
2492 put_osd(osd);
2493}
2494
4e7a5dcd
SW
2495/*
2496 * authentication
2497 */
a3530df3
AE
2498/*
2499 * Note: returned pointer is the address of a structure that's
2500 * managed separately. Caller must *not* attempt to free it.
2501 */
2502static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
8f43fb53 2503 int *proto, int force_new)
4e7a5dcd
SW
2504{
2505 struct ceph_osd *o = con->private;
2506 struct ceph_osd_client *osdc = o->o_osdc;
2507 struct ceph_auth_client *ac = osdc->client->monc.auth;
74f1869f 2508 struct ceph_auth_handshake *auth = &o->o_auth;
4e7a5dcd 2509
74f1869f 2510 if (force_new && auth->authorizer) {
27859f97 2511 ceph_auth_destroy_authorizer(ac, auth->authorizer);
74f1869f
AE
2512 auth->authorizer = NULL;
2513 }
27859f97
SW
2514 if (!auth->authorizer) {
2515 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2516 auth);
4e7a5dcd 2517 if (ret)
a3530df3 2518 return ERR_PTR(ret);
27859f97
SW
2519 } else {
2520 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
0bed9b5c
SW
2521 auth);
2522 if (ret)
2523 return ERR_PTR(ret);
4e7a5dcd 2524 }
4e7a5dcd 2525 *proto = ac->protocol;
74f1869f 2526
a3530df3 2527 return auth;
4e7a5dcd
SW
2528}
2529
2530
2531static int verify_authorizer_reply(struct ceph_connection *con, int len)
2532{
2533 struct ceph_osd *o = con->private;
2534 struct ceph_osd_client *osdc = o->o_osdc;
2535 struct ceph_auth_client *ac = osdc->client->monc.auth;
2536
27859f97 2537 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
4e7a5dcd
SW
2538}
2539
9bd2e6f8
SW
2540static int invalidate_authorizer(struct ceph_connection *con)
2541{
2542 struct ceph_osd *o = con->private;
2543 struct ceph_osd_client *osdc = o->o_osdc;
2544 struct ceph_auth_client *ac = osdc->client->monc.auth;
2545
27859f97 2546 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
9bd2e6f8
SW
2547 return ceph_monc_validate_auth(&osdc->client->monc);
2548}
4e7a5dcd 2549
9e32789f 2550static const struct ceph_connection_operations osd_con_ops = {
f24e9980
SW
2551 .get = get_osd_con,
2552 .put = put_osd_con,
2553 .dispatch = dispatch,
4e7a5dcd
SW
2554 .get_authorizer = get_authorizer,
2555 .verify_authorizer_reply = verify_authorizer_reply,
9bd2e6f8 2556 .invalidate_authorizer = invalidate_authorizer,
f24e9980 2557 .alloc_msg = alloc_msg,
81b024e7 2558 .fault = osd_reset,
f24e9980 2559};