libceph: kill ceph_calc_raw_layout()
[linux-2.6-block.git] / net / ceph / osd_client.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
f24e9980 2
3d14c5d2 3#include <linux/module.h>
f24e9980
SW
4#include <linux/err.h>
5#include <linux/highmem.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/slab.h>
9#include <linux/uaccess.h>
68b4476b
YS
10#ifdef CONFIG_BLOCK
11#include <linux/bio.h>
12#endif
f24e9980 13
3d14c5d2
YS
14#include <linux/ceph/libceph.h>
15#include <linux/ceph/osd_client.h>
16#include <linux/ceph/messenger.h>
17#include <linux/ceph/decode.h>
18#include <linux/ceph/auth.h>
19#include <linux/ceph/pagelist.h>
f24e9980 20
c16e7869
SW
21#define OSD_OP_FRONT_LEN 4096
22#define OSD_OPREPLY_FRONT_LEN 512
0d59ab81 23
9e32789f 24static const struct ceph_connection_operations osd_con_ops;
f24e9980 25
f9d25199 26static void __send_queued(struct ceph_osd_client *osdc);
6f6c7006 27static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
a40c4f10
YS
28static void __register_request(struct ceph_osd_client *osdc,
29 struct ceph_osd_request *req);
30static void __unregister_linger_request(struct ceph_osd_client *osdc,
31 struct ceph_osd_request *req);
56e925b6
SW
32static void __send_request(struct ceph_osd_client *osdc,
33 struct ceph_osd_request *req);
f24e9980 34
68b4476b
YS
35static int op_has_extent(int op)
36{
37 return (op == CEPH_OSD_OP_READ ||
38 op == CEPH_OSD_OP_WRITE);
39}
40
f24e9980
SW
41/*
42 * Implement client access to distributed object storage cluster.
43 *
44 * All data objects are stored within a cluster/cloud of OSDs, or
45 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
46 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
47 * remote daemons serving up and coordinating consistent and safe
48 * access to storage.
49 *
50 * Cluster membership and the mapping of data objects onto storage devices
51 * are described by the osd map.
52 *
53 * We keep track of pending OSD requests (read, write), resubmit
54 * requests to different OSDs when the cluster topology/data layout
55 * change, or retry the affected requests when the communications
56 * channel with an OSD is reset.
57 */
58
59/*
60 * calculate the mapping of a file extent onto an object, and fill out the
61 * request accordingly. shorten extent as necessary if it crosses an
62 * object boundary.
63 *
64 * fill osd op in request message.
65 */
e75b45cf 66static int calc_layout(struct ceph_vino vino,
d63b77f4
SW
67 struct ceph_file_layout *layout,
68 u64 off, u64 *plen,
69 struct ceph_osd_request *req,
70 struct ceph_osd_req_op *op)
f24e9980 71{
60e56f13
AE
72 u64 orig_len = *plen;
73 u64 bno = 0;
74 u64 objoff = 0;
75 u64 objlen = 0;
d63b77f4 76 int r;
f24e9980 77
60e56f13
AE
78 /* object extent? */
79 r = ceph_calc_file_object_mapping(layout, off, orig_len, &bno,
80 &objoff, &objlen);
d63b77f4
SW
81 if (r < 0)
82 return r;
60e56f13
AE
83 if (objlen < orig_len) {
84 *plen = objlen;
85 dout(" skipping last %llu, final file extent %llu~%llu\n",
86 orig_len - *plen, off, *plen);
87 }
88
89 if (op_has_extent(op->op)) {
90 u32 osize = le32_to_cpu(layout->fl_object_size);
91 op->extent.offset = objoff;
92 op->extent.length = objlen;
93 if (op->extent.truncate_size <= off - objoff) {
94 op->extent.truncate_size = 0;
95 } else {
96 op->extent.truncate_size -= off - objoff;
97 if (op->extent.truncate_size > osize)
98 op->extent.truncate_size = osize;
99 }
100 }
101 req->r_num_pages = calc_pages_for(off, *plen);
102 req->r_page_alignment = off & ~PAGE_MASK;
103 if (op->op == CEPH_OSD_OP_WRITE)
104 op->payload_len = *plen;
105
106 dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
107 bno, objoff, objlen, req->r_num_pages);
f24e9980 108
2dab036b 109 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
f24e9980 110 req->r_oid_len = strlen(req->r_oid);
d63b77f4
SW
111
112 return r;
f24e9980
SW
113}
114
f24e9980
SW
115/*
116 * requests
117 */
415e49a9 118void ceph_osdc_release_request(struct kref *kref)
f24e9980 119{
415e49a9
SW
120 struct ceph_osd_request *req = container_of(kref,
121 struct ceph_osd_request,
122 r_kref);
123
124 if (req->r_request)
125 ceph_msg_put(req->r_request);
0d59ab81 126 if (req->r_con_filling_msg) {
9cbb1d72
AE
127 dout("%s revoking msg %p from con %p\n", __func__,
128 req->r_reply, req->r_con_filling_msg);
8921d114 129 ceph_msg_revoke_incoming(req->r_reply);
0d47766f 130 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
9cbb1d72 131 req->r_con_filling_msg = NULL;
350b1c32 132 }
ab8cb34a
AE
133 if (req->r_reply)
134 ceph_msg_put(req->r_reply);
415e49a9
SW
135 if (req->r_own_pages)
136 ceph_release_page_vector(req->r_pages,
137 req->r_num_pages);
138 ceph_put_snap_context(req->r_snapc);
c885837f 139 ceph_pagelist_release(&req->r_trail);
415e49a9
SW
140 if (req->r_mempool)
141 mempool_free(req, req->r_osdc->req_mempool);
142 else
143 kfree(req);
f24e9980 144}
3d14c5d2 145EXPORT_SYMBOL(ceph_osdc_release_request);
68b4476b 146
3499e8a5 147struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
f24e9980 148 struct ceph_snap_context *snapc,
ae7ca4a3 149 unsigned int num_op,
3499e8a5 150 bool use_mempool,
54a54007 151 gfp_t gfp_flags)
f24e9980
SW
152{
153 struct ceph_osd_request *req;
154 struct ceph_msg *msg;
68b4476b 155 size_t msg_size = sizeof(struct ceph_osd_request_head);
3499e8a5 156
68b4476b 157 msg_size += num_op*sizeof(struct ceph_osd_op);
f24e9980
SW
158
159 if (use_mempool) {
3499e8a5 160 req = mempool_alloc(osdc->req_mempool, gfp_flags);
f24e9980
SW
161 memset(req, 0, sizeof(*req));
162 } else {
3499e8a5 163 req = kzalloc(sizeof(*req), gfp_flags);
f24e9980
SW
164 }
165 if (req == NULL)
a79832f2 166 return NULL;
f24e9980 167
f24e9980
SW
168 req->r_osdc = osdc;
169 req->r_mempool = use_mempool;
68b4476b 170
415e49a9 171 kref_init(&req->r_kref);
f24e9980
SW
172 init_completion(&req->r_completion);
173 init_completion(&req->r_safe_completion);
a978fa20 174 RB_CLEAR_NODE(&req->r_node);
f24e9980 175 INIT_LIST_HEAD(&req->r_unsafe_item);
a40c4f10
YS
176 INIT_LIST_HEAD(&req->r_linger_item);
177 INIT_LIST_HEAD(&req->r_linger_osd);
935b639a 178 INIT_LIST_HEAD(&req->r_req_lru_item);
cd43045c
SW
179 INIT_LIST_HEAD(&req->r_osd_item);
180
c16e7869
SW
181 /* create reply message */
182 if (use_mempool)
183 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
184 else
185 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
b61c2763 186 OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
a79832f2 187 if (!msg) {
c16e7869 188 ceph_osdc_put_request(req);
a79832f2 189 return NULL;
c16e7869
SW
190 }
191 req->r_reply = msg;
192
c885837f 193 ceph_pagelist_init(&req->r_trail);
d50b409f 194
c16e7869 195 /* create request message; allow space for oid */
224736d9 196 msg_size += MAX_OBJ_NAME_SIZE;
f24e9980
SW
197 if (snapc)
198 msg_size += sizeof(u64) * snapc->num_snaps;
199 if (use_mempool)
8f3bc053 200 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
f24e9980 201 else
b61c2763 202 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
a79832f2 203 if (!msg) {
f24e9980 204 ceph_osdc_put_request(req);
a79832f2 205 return NULL;
f24e9980 206 }
68b4476b 207
f24e9980 208 memset(msg->front.iov_base, 0, msg->front.iov_len);
3499e8a5
YS
209
210 req->r_request = msg;
3499e8a5
YS
211
212 return req;
213}
3d14c5d2 214EXPORT_SYMBOL(ceph_osdc_alloc_request);
3499e8a5 215
68b4476b
YS
216static void osd_req_encode_op(struct ceph_osd_request *req,
217 struct ceph_osd_op *dst,
218 struct ceph_osd_req_op *src)
219{
220 dst->op = cpu_to_le16(src->op);
221
065a68f9 222 switch (src->op) {
68b4476b
YS
223 case CEPH_OSD_OP_READ:
224 case CEPH_OSD_OP_WRITE:
225 dst->extent.offset =
226 cpu_to_le64(src->extent.offset);
227 dst->extent.length =
228 cpu_to_le64(src->extent.length);
229 dst->extent.truncate_size =
230 cpu_to_le64(src->extent.truncate_size);
231 dst->extent.truncate_seq =
232 cpu_to_le32(src->extent.truncate_seq);
233 break;
234
235 case CEPH_OSD_OP_GETXATTR:
236 case CEPH_OSD_OP_SETXATTR:
237 case CEPH_OSD_OP_CMPXATTR:
68b4476b
YS
238 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
239 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
240 dst->xattr.cmp_op = src->xattr.cmp_op;
241 dst->xattr.cmp_mode = src->xattr.cmp_mode;
c885837f 242 ceph_pagelist_append(&req->r_trail, src->xattr.name,
68b4476b 243 src->xattr.name_len);
c885837f 244 ceph_pagelist_append(&req->r_trail, src->xattr.val,
68b4476b
YS
245 src->xattr.value_len);
246 break;
ae1533b6 247 case CEPH_OSD_OP_CALL:
ae1533b6
YS
248 dst->cls.class_len = src->cls.class_len;
249 dst->cls.method_len = src->cls.method_len;
250 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
251
c885837f 252 ceph_pagelist_append(&req->r_trail, src->cls.class_name,
ae1533b6 253 src->cls.class_len);
c885837f 254 ceph_pagelist_append(&req->r_trail, src->cls.method_name,
ae1533b6 255 src->cls.method_len);
c885837f 256 ceph_pagelist_append(&req->r_trail, src->cls.indata,
ae1533b6
YS
257 src->cls.indata_len);
258 break;
259 case CEPH_OSD_OP_ROLLBACK:
260 dst->snap.snapid = cpu_to_le64(src->snap.snapid);
261 break;
68b4476b
YS
262 case CEPH_OSD_OP_STARTSYNC:
263 break;
a40c4f10
YS
264 case CEPH_OSD_OP_NOTIFY:
265 {
266 __le32 prot_ver = cpu_to_le32(src->watch.prot_ver);
267 __le32 timeout = cpu_to_le32(src->watch.timeout);
268
c885837f 269 ceph_pagelist_append(&req->r_trail,
a40c4f10 270 &prot_ver, sizeof(prot_ver));
c885837f 271 ceph_pagelist_append(&req->r_trail,
a40c4f10
YS
272 &timeout, sizeof(timeout));
273 }
274 case CEPH_OSD_OP_NOTIFY_ACK:
275 case CEPH_OSD_OP_WATCH:
276 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
277 dst->watch.ver = cpu_to_le64(src->watch.ver);
278 dst->watch.flag = src->watch.flag;
279 break;
68b4476b
YS
280 default:
281 pr_err("unrecognized osd opcode %d\n", dst->op);
282 WARN_ON(1);
283 break;
284 }
285 dst->payload_len = cpu_to_le32(src->payload_len);
286}
287
3499e8a5
YS
288/*
289 * build new request AND message
290 *
291 */
292void ceph_osdc_build_request(struct ceph_osd_request *req,
ae7ca4a3 293 u64 off, u64 len, unsigned int num_op,
68b4476b 294 struct ceph_osd_req_op *src_ops,
4d6b250b 295 struct ceph_snap_context *snapc, u64 snap_id,
af77f26c 296 struct timespec *mtime)
3499e8a5
YS
297{
298 struct ceph_msg *msg = req->r_request;
299 struct ceph_osd_request_head *head;
68b4476b 300 struct ceph_osd_req_op *src_op;
3499e8a5
YS
301 struct ceph_osd_op *op;
302 void *p;
3499e8a5 303 size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
3499e8a5 304 int flags = req->r_flags;
68b4476b
YS
305 u64 data_len = 0;
306 int i;
3499e8a5 307
d178a9e7
AE
308 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
309
f24e9980 310 head = msg->front.iov_base;
4d6b250b 311 head->snapid = cpu_to_le64(snap_id);
f24e9980
SW
312 op = (void *)(head + 1);
313 p = (void *)(op + num_op);
314
f24e9980
SW
315 req->r_snapc = ceph_get_snap_context(snapc);
316
317 head->client_inc = cpu_to_le32(1); /* always, for now. */
318 head->flags = cpu_to_le32(flags);
319 if (flags & CEPH_OSD_FLAG_WRITE)
320 ceph_encode_timespec(&head->mtime, mtime);
ae7ca4a3 321 BUG_ON(num_op > (unsigned int) ((u16) -1));
f24e9980 322 head->num_ops = cpu_to_le16(num_op);
f24e9980 323
f24e9980 324 /* fill in oid */
af77f26c
AE
325 head->object_len = cpu_to_le32(req->r_oid_len);
326 memcpy(p, req->r_oid, req->r_oid_len);
327 p += req->r_oid_len;
f24e9980 328
68b4476b 329 src_op = src_ops;
ae7ca4a3
AE
330 while (num_op--)
331 osd_req_encode_op(req, op++, src_op++);
68b4476b 332
c885837f 333 data_len += req->r_trail.length;
68b4476b 334
f24e9980
SW
335 if (snapc) {
336 head->snap_seq = cpu_to_le64(snapc->seq);
337 head->num_snaps = cpu_to_le32(snapc->num_snaps);
338 for (i = 0; i < snapc->num_snaps; i++) {
339 put_unaligned_le64(snapc->snaps[i], p);
340 p += sizeof(u64);
341 }
342 }
343
68b4476b
YS
344 if (flags & CEPH_OSD_FLAG_WRITE) {
345 req->r_request->hdr.data_off = cpu_to_le16(off);
0120be3c 346 req->r_request->hdr.data_len = cpu_to_le32(len + data_len);
68b4476b
YS
347 } else if (data_len) {
348 req->r_request->hdr.data_off = 0;
349 req->r_request->hdr.data_len = cpu_to_le32(data_len);
350 }
351
c5c6b19d
SW
352 req->r_request->page_alignment = req->r_page_alignment;
353
f24e9980 354 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
6f863e71
SW
355 msg_size = p - msg->front.iov_base;
356 msg->front.iov_len = msg_size;
357 msg->hdr.front_len = cpu_to_le32(msg_size);
3499e8a5
YS
358 return;
359}
3d14c5d2 360EXPORT_SYMBOL(ceph_osdc_build_request);
3499e8a5
YS
361
362/*
363 * build new request AND message, calculate layout, and adjust file
364 * extent as needed.
365 *
366 * if the file was recently truncated, we include information about its
367 * old and new size so that the object can be updated appropriately. (we
368 * avoid synchronously deleting truncated objects because it's slow.)
369 *
370 * if @do_sync, include a 'startsync' command so that the osd will flush
371 * data quickly.
372 */
373struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
374 struct ceph_file_layout *layout,
375 struct ceph_vino vino,
376 u64 off, u64 *plen,
377 int opcode, int flags,
378 struct ceph_snap_context *snapc,
379 int do_sync,
380 u32 truncate_seq,
381 u64 truncate_size,
382 struct timespec *mtime,
a3bea47e 383 bool use_mempool,
b7495fc2 384 int page_align)
3499e8a5 385{
ae7ca4a3 386 struct ceph_osd_req_op ops[2];
68b4476b 387 struct ceph_osd_request *req;
ae7ca4a3 388 unsigned int num_op = 1;
6816282d 389 int r;
68b4476b 390
ae7ca4a3
AE
391 memset(&ops, 0, sizeof ops);
392
68b4476b
YS
393 ops[0].op = opcode;
394 ops[0].extent.truncate_seq = truncate_seq;
395 ops[0].extent.truncate_size = truncate_size;
68b4476b
YS
396
397 if (do_sync) {
398 ops[1].op = CEPH_OSD_OP_STARTSYNC;
ae7ca4a3
AE
399 num_op++;
400 }
68b4476b 401
ae7ca4a3
AE
402 req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool,
403 GFP_NOFS);
4ad12621 404 if (!req)
6816282d 405 return ERR_PTR(-ENOMEM);
d178a9e7 406 req->r_flags = flags;
3499e8a5
YS
407
408 /* calculate max write size */
e75b45cf 409 r = calc_layout(vino, layout, off, plen, req, ops);
6816282d
SW
410 if (r < 0)
411 return ERR_PTR(r);
3499e8a5
YS
412 req->r_file_layout = *layout; /* keep a copy */
413
9bb0ce2b
SW
414 /* in case it differs from natural (file) alignment that
415 calc_layout filled in for us */
416 req->r_num_pages = calc_pages_for(page_align, *plen);
b7495fc2
SW
417 req->r_page_alignment = page_align;
418
ae7ca4a3
AE
419 ceph_osdc_build_request(req, off, *plen, num_op, ops,
420 snapc, vino.snap, mtime);
3499e8a5 421
f24e9980
SW
422 return req;
423}
3d14c5d2 424EXPORT_SYMBOL(ceph_osdc_new_request);
f24e9980
SW
425
426/*
427 * We keep osd requests in an rbtree, sorted by ->r_tid.
428 */
429static void __insert_request(struct ceph_osd_client *osdc,
430 struct ceph_osd_request *new)
431{
432 struct rb_node **p = &osdc->requests.rb_node;
433 struct rb_node *parent = NULL;
434 struct ceph_osd_request *req = NULL;
435
436 while (*p) {
437 parent = *p;
438 req = rb_entry(parent, struct ceph_osd_request, r_node);
439 if (new->r_tid < req->r_tid)
440 p = &(*p)->rb_left;
441 else if (new->r_tid > req->r_tid)
442 p = &(*p)->rb_right;
443 else
444 BUG();
445 }
446
447 rb_link_node(&new->r_node, parent, p);
448 rb_insert_color(&new->r_node, &osdc->requests);
449}
450
451static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
452 u64 tid)
453{
454 struct ceph_osd_request *req;
455 struct rb_node *n = osdc->requests.rb_node;
456
457 while (n) {
458 req = rb_entry(n, struct ceph_osd_request, r_node);
459 if (tid < req->r_tid)
460 n = n->rb_left;
461 else if (tid > req->r_tid)
462 n = n->rb_right;
463 else
464 return req;
465 }
466 return NULL;
467}
468
469static struct ceph_osd_request *
470__lookup_request_ge(struct ceph_osd_client *osdc,
471 u64 tid)
472{
473 struct ceph_osd_request *req;
474 struct rb_node *n = osdc->requests.rb_node;
475
476 while (n) {
477 req = rb_entry(n, struct ceph_osd_request, r_node);
478 if (tid < req->r_tid) {
479 if (!n->rb_left)
480 return req;
481 n = n->rb_left;
482 } else if (tid > req->r_tid) {
483 n = n->rb_right;
484 } else {
485 return req;
486 }
487 }
488 return NULL;
489}
490
6f6c7006
SW
491/*
492 * Resubmit requests pending on the given osd.
493 */
494static void __kick_osd_requests(struct ceph_osd_client *osdc,
495 struct ceph_osd *osd)
496{
a40c4f10 497 struct ceph_osd_request *req, *nreq;
6f6c7006
SW
498 int err;
499
500 dout("__kick_osd_requests osd%d\n", osd->o_osd);
501 err = __reset_osd(osdc, osd);
685a7555 502 if (err)
6f6c7006
SW
503 return;
504
505 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
506 list_move(&req->r_req_lru_item, &osdc->req_unsent);
507 dout("requeued %p tid %llu osd%d\n", req, req->r_tid,
508 osd->o_osd);
a40c4f10
YS
509 if (!req->r_linger)
510 req->r_flags |= CEPH_OSD_FLAG_RETRY;
511 }
512
513 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
514 r_linger_osd) {
77f38e0e
SW
515 /*
516 * reregister request prior to unregistering linger so
517 * that r_osd is preserved.
518 */
519 BUG_ON(!list_empty(&req->r_req_lru_item));
a40c4f10 520 __register_request(osdc, req);
77f38e0e
SW
521 list_add(&req->r_req_lru_item, &osdc->req_unsent);
522 list_add(&req->r_osd_item, &req->r_osd->o_requests);
523 __unregister_linger_request(osdc, req);
a40c4f10
YS
524 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
525 osd->o_osd);
6f6c7006
SW
526 }
527}
528
f24e9980 529/*
81b024e7 530 * If the osd connection drops, we need to resubmit all requests.
f24e9980
SW
531 */
532static void osd_reset(struct ceph_connection *con)
533{
534 struct ceph_osd *osd = con->private;
535 struct ceph_osd_client *osdc;
536
537 if (!osd)
538 return;
539 dout("osd_reset osd%d\n", osd->o_osd);
540 osdc = osd->o_osdc;
f24e9980 541 down_read(&osdc->map_sem);
83aff95e
SW
542 mutex_lock(&osdc->request_mutex);
543 __kick_osd_requests(osdc, osd);
f9d25199 544 __send_queued(osdc);
83aff95e 545 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
546 up_read(&osdc->map_sem);
547}
548
549/*
550 * Track open sessions with osds.
551 */
e10006f8 552static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
f24e9980
SW
553{
554 struct ceph_osd *osd;
555
556 osd = kzalloc(sizeof(*osd), GFP_NOFS);
557 if (!osd)
558 return NULL;
559
560 atomic_set(&osd->o_ref, 1);
561 osd->o_osdc = osdc;
e10006f8 562 osd->o_osd = onum;
f407731d 563 RB_CLEAR_NODE(&osd->o_node);
f24e9980 564 INIT_LIST_HEAD(&osd->o_requests);
a40c4f10 565 INIT_LIST_HEAD(&osd->o_linger_requests);
f5a2041b 566 INIT_LIST_HEAD(&osd->o_osd_lru);
f24e9980
SW
567 osd->o_incarnation = 1;
568
b7a9e5dd 569 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
4e7a5dcd 570
422d2cb8 571 INIT_LIST_HEAD(&osd->o_keepalive_item);
f24e9980
SW
572 return osd;
573}
574
575static struct ceph_osd *get_osd(struct ceph_osd *osd)
576{
577 if (atomic_inc_not_zero(&osd->o_ref)) {
578 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
579 atomic_read(&osd->o_ref));
580 return osd;
581 } else {
582 dout("get_osd %p FAIL\n", osd);
583 return NULL;
584 }
585}
586
587static void put_osd(struct ceph_osd *osd)
588{
589 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
590 atomic_read(&osd->o_ref) - 1);
a255651d 591 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
79494d1b
SW
592 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
593
a255651d 594 if (ac->ops && ac->ops->destroy_authorizer)
6c4a1915 595 ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
f24e9980 596 kfree(osd);
79494d1b 597 }
f24e9980
SW
598}
599
600/*
601 * remove an osd from our map
602 */
f5a2041b 603static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 604{
f5a2041b 605 dout("__remove_osd %p\n", osd);
f24e9980
SW
606 BUG_ON(!list_empty(&osd->o_requests));
607 rb_erase(&osd->o_node, &osdc->osds);
f5a2041b 608 list_del_init(&osd->o_osd_lru);
f24e9980
SW
609 ceph_con_close(&osd->o_con);
610 put_osd(osd);
611}
612
aca420bc
SW
613static void remove_all_osds(struct ceph_osd_client *osdc)
614{
048a9d2d 615 dout("%s %p\n", __func__, osdc);
aca420bc
SW
616 mutex_lock(&osdc->request_mutex);
617 while (!RB_EMPTY_ROOT(&osdc->osds)) {
618 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
619 struct ceph_osd, o_node);
620 __remove_osd(osdc, osd);
621 }
622 mutex_unlock(&osdc->request_mutex);
623}
624
f5a2041b
YS
625static void __move_osd_to_lru(struct ceph_osd_client *osdc,
626 struct ceph_osd *osd)
627{
628 dout("__move_osd_to_lru %p\n", osd);
629 BUG_ON(!list_empty(&osd->o_osd_lru));
630 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
3d14c5d2 631 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
f5a2041b
YS
632}
633
634static void __remove_osd_from_lru(struct ceph_osd *osd)
635{
636 dout("__remove_osd_from_lru %p\n", osd);
637 if (!list_empty(&osd->o_osd_lru))
638 list_del_init(&osd->o_osd_lru);
639}
640
aca420bc 641static void remove_old_osds(struct ceph_osd_client *osdc)
f5a2041b
YS
642{
643 struct ceph_osd *osd, *nosd;
644
645 dout("__remove_old_osds %p\n", osdc);
646 mutex_lock(&osdc->request_mutex);
647 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
aca420bc 648 if (time_before(jiffies, osd->lru_ttl))
f5a2041b
YS
649 break;
650 __remove_osd(osdc, osd);
651 }
652 mutex_unlock(&osdc->request_mutex);
653}
654
f24e9980
SW
655/*
656 * reset osd connect
657 */
f5a2041b 658static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 659{
c3acb181 660 struct ceph_entity_addr *peer_addr;
f24e9980 661
f5a2041b 662 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
a40c4f10
YS
663 if (list_empty(&osd->o_requests) &&
664 list_empty(&osd->o_linger_requests)) {
f5a2041b 665 __remove_osd(osdc, osd);
c3acb181
AE
666
667 return -ENODEV;
668 }
669
670 peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
671 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
672 !ceph_con_opened(&osd->o_con)) {
673 struct ceph_osd_request *req;
674
87b315a5
SW
675 dout(" osd addr hasn't changed and connection never opened,"
676 " letting msgr retry");
677 /* touch each r_stamp for handle_timeout()'s benfit */
678 list_for_each_entry(req, &osd->o_requests, r_osd_item)
679 req->r_stamp = jiffies;
c3acb181
AE
680
681 return -EAGAIN;
f24e9980 682 }
c3acb181
AE
683
684 ceph_con_close(&osd->o_con);
685 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
686 osd->o_incarnation++;
687
688 return 0;
f24e9980
SW
689}
690
691static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
692{
693 struct rb_node **p = &osdc->osds.rb_node;
694 struct rb_node *parent = NULL;
695 struct ceph_osd *osd = NULL;
696
aca420bc 697 dout("__insert_osd %p osd%d\n", new, new->o_osd);
f24e9980
SW
698 while (*p) {
699 parent = *p;
700 osd = rb_entry(parent, struct ceph_osd, o_node);
701 if (new->o_osd < osd->o_osd)
702 p = &(*p)->rb_left;
703 else if (new->o_osd > osd->o_osd)
704 p = &(*p)->rb_right;
705 else
706 BUG();
707 }
708
709 rb_link_node(&new->o_node, parent, p);
710 rb_insert_color(&new->o_node, &osdc->osds);
711}
712
713static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
714{
715 struct ceph_osd *osd;
716 struct rb_node *n = osdc->osds.rb_node;
717
718 while (n) {
719 osd = rb_entry(n, struct ceph_osd, o_node);
720 if (o < osd->o_osd)
721 n = n->rb_left;
722 else if (o > osd->o_osd)
723 n = n->rb_right;
724 else
725 return osd;
726 }
727 return NULL;
728}
729
422d2cb8
YS
730static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
731{
732 schedule_delayed_work(&osdc->timeout_work,
3d14c5d2 733 osdc->client->options->osd_keepalive_timeout * HZ);
422d2cb8
YS
734}
735
736static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
737{
738 cancel_delayed_work(&osdc->timeout_work);
739}
f24e9980
SW
740
741/*
742 * Register request, assign tid. If this is the first request, set up
743 * the timeout event.
744 */
a40c4f10
YS
745static void __register_request(struct ceph_osd_client *osdc,
746 struct ceph_osd_request *req)
f24e9980 747{
f24e9980 748 req->r_tid = ++osdc->last_tid;
6df058c0 749 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
77f38e0e 750 dout("__register_request %p tid %lld\n", req, req->r_tid);
f24e9980
SW
751 __insert_request(osdc, req);
752 ceph_osdc_get_request(req);
753 osdc->num_requests++;
f24e9980 754 if (osdc->num_requests == 1) {
422d2cb8
YS
755 dout(" first request, scheduling timeout\n");
756 __schedule_osd_timeout(osdc);
f24e9980 757 }
a40c4f10
YS
758}
759
760static void register_request(struct ceph_osd_client *osdc,
761 struct ceph_osd_request *req)
762{
763 mutex_lock(&osdc->request_mutex);
764 __register_request(osdc, req);
f24e9980
SW
765 mutex_unlock(&osdc->request_mutex);
766}
767
768/*
769 * called under osdc->request_mutex
770 */
771static void __unregister_request(struct ceph_osd_client *osdc,
772 struct ceph_osd_request *req)
773{
35f9f8a0
SW
774 if (RB_EMPTY_NODE(&req->r_node)) {
775 dout("__unregister_request %p tid %lld not registered\n",
776 req, req->r_tid);
777 return;
778 }
779
f24e9980
SW
780 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
781 rb_erase(&req->r_node, &osdc->requests);
782 osdc->num_requests--;
783
0ba6478d
SW
784 if (req->r_osd) {
785 /* make sure the original request isn't in flight. */
6740a845 786 ceph_msg_revoke(req->r_request);
0ba6478d
SW
787
788 list_del_init(&req->r_osd_item);
a40c4f10
YS
789 if (list_empty(&req->r_osd->o_requests) &&
790 list_empty(&req->r_osd->o_linger_requests)) {
791 dout("moving osd to %p lru\n", req->r_osd);
f5a2041b 792 __move_osd_to_lru(osdc, req->r_osd);
a40c4f10 793 }
fbdb9190 794 if (list_empty(&req->r_linger_item))
a40c4f10 795 req->r_osd = NULL;
0ba6478d 796 }
f24e9980 797
7d5f2481 798 list_del_init(&req->r_req_lru_item);
f24e9980
SW
799 ceph_osdc_put_request(req);
800
422d2cb8
YS
801 if (osdc->num_requests == 0) {
802 dout(" no requests, canceling timeout\n");
803 __cancel_osd_timeout(osdc);
f24e9980
SW
804 }
805}
806
807/*
808 * Cancel a previously queued request message
809 */
810static void __cancel_request(struct ceph_osd_request *req)
811{
6bc18876 812 if (req->r_sent && req->r_osd) {
6740a845 813 ceph_msg_revoke(req->r_request);
f24e9980
SW
814 req->r_sent = 0;
815 }
816}
817
a40c4f10
YS
818static void __register_linger_request(struct ceph_osd_client *osdc,
819 struct ceph_osd_request *req)
820{
821 dout("__register_linger_request %p\n", req);
822 list_add_tail(&req->r_linger_item, &osdc->req_linger);
6194ea89
SW
823 if (req->r_osd)
824 list_add_tail(&req->r_linger_osd,
825 &req->r_osd->o_linger_requests);
a40c4f10
YS
826}
827
828static void __unregister_linger_request(struct ceph_osd_client *osdc,
829 struct ceph_osd_request *req)
830{
831 dout("__unregister_linger_request %p\n", req);
61c74035 832 list_del_init(&req->r_linger_item);
a40c4f10 833 if (req->r_osd) {
a40c4f10
YS
834 list_del_init(&req->r_linger_osd);
835
836 if (list_empty(&req->r_osd->o_requests) &&
837 list_empty(&req->r_osd->o_linger_requests)) {
838 dout("moving osd to %p lru\n", req->r_osd);
839 __move_osd_to_lru(osdc, req->r_osd);
840 }
fbdb9190
SW
841 if (list_empty(&req->r_osd_item))
842 req->r_osd = NULL;
a40c4f10
YS
843 }
844}
845
846void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
847 struct ceph_osd_request *req)
848{
849 mutex_lock(&osdc->request_mutex);
850 if (req->r_linger) {
851 __unregister_linger_request(osdc, req);
852 ceph_osdc_put_request(req);
853 }
854 mutex_unlock(&osdc->request_mutex);
855}
856EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
857
858void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
859 struct ceph_osd_request *req)
860{
861 if (!req->r_linger) {
862 dout("set_request_linger %p\n", req);
863 req->r_linger = 1;
864 /*
865 * caller is now responsible for calling
866 * unregister_linger_request
867 */
868 ceph_osdc_get_request(req);
869 }
870}
871EXPORT_SYMBOL(ceph_osdc_set_request_linger);
872
f24e9980
SW
873/*
874 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
875 * (as needed), and set the request r_osd appropriately. If there is
25985edc 876 * no up osd, set r_osd to NULL. Move the request to the appropriate list
6f6c7006 877 * (unsent, homeless) or leave on in-flight lru.
f24e9980
SW
878 *
879 * Return 0 if unchanged, 1 if changed, or negative on error.
880 *
881 * Caller should hold map_sem for read and request_mutex.
882 */
6f6c7006 883static int __map_request(struct ceph_osd_client *osdc,
38d6453c 884 struct ceph_osd_request *req, int force_resend)
f24e9980
SW
885{
886 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
51042122 887 struct ceph_pg pgid;
d85b7056
SW
888 int acting[CEPH_PG_MAX_SIZE];
889 int o = -1, num = 0;
f24e9980 890 int err;
f24e9980 891
6f6c7006 892 dout("map_request %p tid %lld\n", req, req->r_tid);
f24e9980
SW
893 err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
894 &req->r_file_layout, osdc->osdmap);
6f6c7006
SW
895 if (err) {
896 list_move(&req->r_req_lru_item, &osdc->req_notarget);
f24e9980 897 return err;
6f6c7006 898 }
51042122 899 pgid = reqhead->layout.ol_pgid;
7740a42f
SW
900 req->r_pgid = pgid;
901
d85b7056
SW
902 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
903 if (err > 0) {
904 o = acting[0];
905 num = err;
906 }
f24e9980 907
38d6453c
SW
908 if ((!force_resend &&
909 req->r_osd && req->r_osd->o_osd == o &&
d85b7056
SW
910 req->r_sent >= req->r_osd->o_incarnation &&
911 req->r_num_pg_osds == num &&
912 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
f24e9980
SW
913 (req->r_osd == NULL && o == -1))
914 return 0; /* no change */
915
6f6c7006 916 dout("map_request tid %llu pgid %d.%x osd%d (was osd%d)\n",
51042122 917 req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
f24e9980
SW
918 req->r_osd ? req->r_osd->o_osd : -1);
919
d85b7056
SW
920 /* record full pg acting set */
921 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
922 req->r_num_pg_osds = num;
923
f24e9980
SW
924 if (req->r_osd) {
925 __cancel_request(req);
926 list_del_init(&req->r_osd_item);
f24e9980
SW
927 req->r_osd = NULL;
928 }
929
930 req->r_osd = __lookup_osd(osdc, o);
931 if (!req->r_osd && o >= 0) {
c99eb1c7 932 err = -ENOMEM;
e10006f8 933 req->r_osd = create_osd(osdc, o);
6f6c7006
SW
934 if (!req->r_osd) {
935 list_move(&req->r_req_lru_item, &osdc->req_notarget);
c99eb1c7 936 goto out;
6f6c7006 937 }
f24e9980 938
6f6c7006 939 dout("map_request osd %p is osd%d\n", req->r_osd, o);
f24e9980
SW
940 __insert_osd(osdc, req->r_osd);
941
b7a9e5dd
SW
942 ceph_con_open(&req->r_osd->o_con,
943 CEPH_ENTITY_TYPE_OSD, o,
944 &osdc->osdmap->osd_addr[o]);
f24e9980
SW
945 }
946
f5a2041b
YS
947 if (req->r_osd) {
948 __remove_osd_from_lru(req->r_osd);
f24e9980 949 list_add(&req->r_osd_item, &req->r_osd->o_requests);
6f6c7006
SW
950 list_move(&req->r_req_lru_item, &osdc->req_unsent);
951 } else {
952 list_move(&req->r_req_lru_item, &osdc->req_notarget);
f5a2041b 953 }
d85b7056 954 err = 1; /* osd or pg changed */
f24e9980
SW
955
956out:
f24e9980
SW
957 return err;
958}
959
960/*
961 * caller should hold map_sem (for read) and request_mutex
962 */
56e925b6
SW
963static void __send_request(struct ceph_osd_client *osdc,
964 struct ceph_osd_request *req)
f24e9980
SW
965{
966 struct ceph_osd_request_head *reqhead;
f24e9980
SW
967
968 dout("send_request %p tid %llu to osd%d flags %d\n",
969 req, req->r_tid, req->r_osd->o_osd, req->r_flags);
970
971 reqhead = req->r_request->front.iov_base;
972 reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
973 reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */
974 reqhead->reassert_version = req->r_reassert_version;
975
3dd72fc0 976 req->r_stamp = jiffies;
07a27e22 977 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
f24e9980
SW
978
979 ceph_msg_get(req->r_request); /* send consumes a ref */
980 ceph_con_send(&req->r_osd->o_con, req->r_request);
981 req->r_sent = req->r_osd->o_incarnation;
f24e9980
SW
982}
983
6f6c7006
SW
984/*
985 * Send any requests in the queue (req_unsent).
986 */
f9d25199 987static void __send_queued(struct ceph_osd_client *osdc)
6f6c7006
SW
988{
989 struct ceph_osd_request *req, *tmp;
990
f9d25199
AE
991 dout("__send_queued\n");
992 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
6f6c7006 993 __send_request(osdc, req);
6f6c7006
SW
994}
995
f24e9980
SW
996/*
997 * Timeout callback, called every N seconds when 1 or more osd
998 * requests has been active for more than N seconds. When this
999 * happens, we ping all OSDs with requests who have timed out to
1000 * ensure any communications channel reset is detected. Reset the
1001 * request timeouts another N seconds in the future as we go.
1002 * Reschedule the timeout event another N seconds in future (unless
1003 * there are no open requests).
1004 */
1005static void handle_timeout(struct work_struct *work)
1006{
1007 struct ceph_osd_client *osdc =
1008 container_of(work, struct ceph_osd_client, timeout_work.work);
83aff95e 1009 struct ceph_osd_request *req;
f24e9980 1010 struct ceph_osd *osd;
422d2cb8 1011 unsigned long keepalive =
3d14c5d2 1012 osdc->client->options->osd_keepalive_timeout * HZ;
422d2cb8 1013 struct list_head slow_osds;
f24e9980
SW
1014 dout("timeout\n");
1015 down_read(&osdc->map_sem);
1016
1017 ceph_monc_request_next_osdmap(&osdc->client->monc);
1018
1019 mutex_lock(&osdc->request_mutex);
f24e9980 1020
422d2cb8
YS
1021 /*
1022 * ping osds that are a bit slow. this ensures that if there
1023 * is a break in the TCP connection we will notice, and reopen
1024 * a connection with that osd (from the fault callback).
1025 */
1026 INIT_LIST_HEAD(&slow_osds);
1027 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
3dd72fc0 1028 if (time_before(jiffies, req->r_stamp + keepalive))
422d2cb8
YS
1029 break;
1030
1031 osd = req->r_osd;
1032 BUG_ON(!osd);
1033 dout(" tid %llu is slow, will send keepalive on osd%d\n",
f24e9980 1034 req->r_tid, osd->o_osd);
422d2cb8
YS
1035 list_move_tail(&osd->o_keepalive_item, &slow_osds);
1036 }
1037 while (!list_empty(&slow_osds)) {
1038 osd = list_entry(slow_osds.next, struct ceph_osd,
1039 o_keepalive_item);
1040 list_del_init(&osd->o_keepalive_item);
f24e9980
SW
1041 ceph_con_keepalive(&osd->o_con);
1042 }
1043
422d2cb8 1044 __schedule_osd_timeout(osdc);
f9d25199 1045 __send_queued(osdc);
f24e9980 1046 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1047 up_read(&osdc->map_sem);
1048}
1049
f5a2041b
YS
1050static void handle_osds_timeout(struct work_struct *work)
1051{
1052 struct ceph_osd_client *osdc =
1053 container_of(work, struct ceph_osd_client,
1054 osds_timeout_work.work);
1055 unsigned long delay =
3d14c5d2 1056 osdc->client->options->osd_idle_ttl * HZ >> 2;
f5a2041b
YS
1057
1058 dout("osds timeout\n");
1059 down_read(&osdc->map_sem);
aca420bc 1060 remove_old_osds(osdc);
f5a2041b
YS
1061 up_read(&osdc->map_sem);
1062
1063 schedule_delayed_work(&osdc->osds_timeout_work,
1064 round_jiffies_relative(delay));
1065}
1066
25845472
SW
1067static void complete_request(struct ceph_osd_request *req)
1068{
1069 if (req->r_safe_callback)
1070 req->r_safe_callback(req, NULL);
1071 complete_all(&req->r_safe_completion); /* fsync waiter */
1072}
1073
f24e9980
SW
1074/*
1075 * handle osd op reply. either call the callback if it is specified,
1076 * or do the completion to wake up the waiting thread.
1077 */
350b1c32
SW
1078static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1079 struct ceph_connection *con)
f24e9980
SW
1080{
1081 struct ceph_osd_reply_head *rhead = msg->front.iov_base;
1082 struct ceph_osd_request *req;
1083 u64 tid;
1084 int numops, object_len, flags;
0ceed5db 1085 s32 result;
f24e9980 1086
6df058c0 1087 tid = le64_to_cpu(msg->hdr.tid);
f24e9980
SW
1088 if (msg->front.iov_len < sizeof(*rhead))
1089 goto bad;
f24e9980
SW
1090 numops = le32_to_cpu(rhead->num_ops);
1091 object_len = le32_to_cpu(rhead->object_len);
0ceed5db 1092 result = le32_to_cpu(rhead->result);
f24e9980
SW
1093 if (msg->front.iov_len != sizeof(*rhead) + object_len +
1094 numops * sizeof(struct ceph_osd_op))
1095 goto bad;
0ceed5db 1096 dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result);
f24e9980
SW
1097 /* lookup */
1098 mutex_lock(&osdc->request_mutex);
1099 req = __lookup_request(osdc, tid);
1100 if (req == NULL) {
1101 dout("handle_reply tid %llu dne\n", tid);
1102 mutex_unlock(&osdc->request_mutex);
1103 return;
1104 }
1105 ceph_osdc_get_request(req);
1106 flags = le32_to_cpu(rhead->flags);
1107
350b1c32 1108 /*
0d59ab81 1109 * if this connection filled our message, drop our reference now, to
350b1c32
SW
1110 * avoid a (safe but slower) revoke later.
1111 */
0d59ab81 1112 if (req->r_con_filling_msg == con && req->r_reply == msg) {
c16e7869 1113 dout(" dropping con_filling_msg ref %p\n", con);
0d59ab81 1114 req->r_con_filling_msg = NULL;
0d47766f 1115 con->ops->put(con);
350b1c32
SW
1116 }
1117
f24e9980 1118 if (!req->r_got_reply) {
95c96174 1119 unsigned int bytes;
f24e9980
SW
1120
1121 req->r_result = le32_to_cpu(rhead->result);
1122 bytes = le32_to_cpu(msg->hdr.data_len);
1123 dout("handle_reply result %d bytes %d\n", req->r_result,
1124 bytes);
1125 if (req->r_result == 0)
1126 req->r_result = bytes;
1127
1128 /* in case this is a write and we need to replay, */
1129 req->r_reassert_version = rhead->reassert_version;
1130
1131 req->r_got_reply = 1;
1132 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1133 dout("handle_reply tid %llu dup ack\n", tid);
34b43a56 1134 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1135 goto done;
1136 }
1137
1138 dout("handle_reply tid %llu flags %d\n", tid, flags);
1139
a40c4f10
YS
1140 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
1141 __register_linger_request(osdc, req);
1142
f24e9980 1143 /* either this is a read, or we got the safe response */
0ceed5db
SW
1144 if (result < 0 ||
1145 (flags & CEPH_OSD_FLAG_ONDISK) ||
f24e9980
SW
1146 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
1147 __unregister_request(osdc, req);
1148
1149 mutex_unlock(&osdc->request_mutex);
1150
1151 if (req->r_callback)
1152 req->r_callback(req, msg);
1153 else
03066f23 1154 complete_all(&req->r_completion);
f24e9980 1155
25845472
SW
1156 if (flags & CEPH_OSD_FLAG_ONDISK)
1157 complete_request(req);
f24e9980
SW
1158
1159done:
a40c4f10 1160 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
f24e9980
SW
1161 ceph_osdc_put_request(req);
1162 return;
1163
1164bad:
1165 pr_err("corrupt osd_op_reply got %d %d expected %d\n",
1166 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
1167 (int)sizeof(*rhead));
9ec7cab1 1168 ceph_msg_dump(msg);
f24e9980
SW
1169}
1170
6f6c7006 1171static void reset_changed_osds(struct ceph_osd_client *osdc)
f24e9980 1172{
f24e9980 1173 struct rb_node *p, *n;
f24e9980 1174
6f6c7006
SW
1175 for (p = rb_first(&osdc->osds); p; p = n) {
1176 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
f24e9980 1177
6f6c7006
SW
1178 n = rb_next(p);
1179 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1180 memcmp(&osd->o_con.peer_addr,
1181 ceph_osd_addr(osdc->osdmap,
1182 osd->o_osd),
1183 sizeof(struct ceph_entity_addr)) != 0)
1184 __reset_osd(osdc, osd);
f24e9980 1185 }
422d2cb8
YS
1186}
1187
1188/*
6f6c7006
SW
1189 * Requeue requests whose mapping to an OSD has changed. If requests map to
1190 * no osd, request a new map.
422d2cb8 1191 *
e6d50f67 1192 * Caller should hold map_sem for read.
422d2cb8 1193 */
38d6453c 1194static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
422d2cb8 1195{
a40c4f10 1196 struct ceph_osd_request *req, *nreq;
6f6c7006
SW
1197 struct rb_node *p;
1198 int needmap = 0;
1199 int err;
422d2cb8 1200
38d6453c 1201 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
422d2cb8 1202 mutex_lock(&osdc->request_mutex);
6194ea89 1203 for (p = rb_first(&osdc->requests); p; ) {
6f6c7006 1204 req = rb_entry(p, struct ceph_osd_request, r_node);
6194ea89 1205 p = rb_next(p);
ab60b16d
AE
1206
1207 /*
1208 * For linger requests that have not yet been
1209 * registered, move them to the linger list; they'll
1210 * be sent to the osd in the loop below. Unregister
1211 * the request before re-registering it as a linger
1212 * request to ensure the __map_request() below
1213 * will decide it needs to be sent.
1214 */
1215 if (req->r_linger && list_empty(&req->r_linger_item)) {
1216 dout("%p tid %llu restart on osd%d\n",
1217 req, req->r_tid,
1218 req->r_osd ? req->r_osd->o_osd : -1);
1219 __unregister_request(osdc, req);
1220 __register_linger_request(osdc, req);
1221 continue;
1222 }
1223
38d6453c 1224 err = __map_request(osdc, req, force_resend);
6f6c7006
SW
1225 if (err < 0)
1226 continue; /* error */
1227 if (req->r_osd == NULL) {
1228 dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1229 needmap++; /* request a newer map */
1230 } else if (err > 0) {
6194ea89
SW
1231 if (!req->r_linger) {
1232 dout("%p tid %llu requeued on osd%d\n", req,
1233 req->r_tid,
1234 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1235 req->r_flags |= CEPH_OSD_FLAG_RETRY;
6194ea89
SW
1236 }
1237 }
a40c4f10
YS
1238 }
1239
1240 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
1241 r_linger_item) {
1242 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1243
38d6453c 1244 err = __map_request(osdc, req, force_resend);
ab60b16d 1245 dout("__map_request returned %d\n", err);
a40c4f10
YS
1246 if (err == 0)
1247 continue; /* no change and no osd was specified */
1248 if (err < 0)
1249 continue; /* hrm! */
1250 if (req->r_osd == NULL) {
1251 dout("tid %llu maps to no valid osd\n", req->r_tid);
1252 needmap++; /* request a newer map */
1253 continue;
6f6c7006 1254 }
a40c4f10
YS
1255
1256 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1257 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1258 __register_request(osdc, req);
c89ce05e 1259 __unregister_linger_request(osdc, req);
6f6c7006 1260 }
f24e9980
SW
1261 mutex_unlock(&osdc->request_mutex);
1262
1263 if (needmap) {
1264 dout("%d requests for down osds, need new map\n", needmap);
1265 ceph_monc_request_next_osdmap(&osdc->client->monc);
1266 }
e6d50f67 1267 reset_changed_osds(osdc);
422d2cb8 1268}
6f6c7006
SW
1269
1270
f24e9980
SW
1271/*
1272 * Process updated osd map.
1273 *
1274 * The message contains any number of incremental and full maps, normally
1275 * indicating some sort of topology change in the cluster. Kick requests
1276 * off to different OSDs as needed.
1277 */
1278void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1279{
1280 void *p, *end, *next;
1281 u32 nr_maps, maplen;
1282 u32 epoch;
1283 struct ceph_osdmap *newmap = NULL, *oldmap;
1284 int err;
1285 struct ceph_fsid fsid;
1286
1287 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
1288 p = msg->front.iov_base;
1289 end = p + msg->front.iov_len;
1290
1291 /* verify fsid */
1292 ceph_decode_need(&p, end, sizeof(fsid), bad);
1293 ceph_decode_copy(&p, &fsid, sizeof(fsid));
0743304d
SW
1294 if (ceph_check_fsid(osdc->client, &fsid) < 0)
1295 return;
f24e9980
SW
1296
1297 down_write(&osdc->map_sem);
1298
1299 /* incremental maps */
1300 ceph_decode_32_safe(&p, end, nr_maps, bad);
1301 dout(" %d inc maps\n", nr_maps);
1302 while (nr_maps > 0) {
1303 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1304 epoch = ceph_decode_32(&p);
1305 maplen = ceph_decode_32(&p);
f24e9980
SW
1306 ceph_decode_need(&p, end, maplen, bad);
1307 next = p + maplen;
1308 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
1309 dout("applying incremental map %u len %d\n",
1310 epoch, maplen);
1311 newmap = osdmap_apply_incremental(&p, next,
1312 osdc->osdmap,
15d9882c 1313 &osdc->client->msgr);
f24e9980
SW
1314 if (IS_ERR(newmap)) {
1315 err = PTR_ERR(newmap);
1316 goto bad;
1317 }
30dc6381 1318 BUG_ON(!newmap);
f24e9980
SW
1319 if (newmap != osdc->osdmap) {
1320 ceph_osdmap_destroy(osdc->osdmap);
1321 osdc->osdmap = newmap;
1322 }
38d6453c 1323 kick_requests(osdc, 0);
f24e9980
SW
1324 } else {
1325 dout("ignoring incremental map %u len %d\n",
1326 epoch, maplen);
1327 }
1328 p = next;
1329 nr_maps--;
1330 }
1331 if (newmap)
1332 goto done;
1333
1334 /* full maps */
1335 ceph_decode_32_safe(&p, end, nr_maps, bad);
1336 dout(" %d full maps\n", nr_maps);
1337 while (nr_maps) {
1338 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1339 epoch = ceph_decode_32(&p);
1340 maplen = ceph_decode_32(&p);
f24e9980
SW
1341 ceph_decode_need(&p, end, maplen, bad);
1342 if (nr_maps > 1) {
1343 dout("skipping non-latest full map %u len %d\n",
1344 epoch, maplen);
1345 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
1346 dout("skipping full map %u len %d, "
1347 "older than our %u\n", epoch, maplen,
1348 osdc->osdmap->epoch);
1349 } else {
38d6453c
SW
1350 int skipped_map = 0;
1351
f24e9980
SW
1352 dout("taking full map %u len %d\n", epoch, maplen);
1353 newmap = osdmap_decode(&p, p+maplen);
1354 if (IS_ERR(newmap)) {
1355 err = PTR_ERR(newmap);
1356 goto bad;
1357 }
30dc6381 1358 BUG_ON(!newmap);
f24e9980
SW
1359 oldmap = osdc->osdmap;
1360 osdc->osdmap = newmap;
38d6453c
SW
1361 if (oldmap) {
1362 if (oldmap->epoch + 1 < newmap->epoch)
1363 skipped_map = 1;
f24e9980 1364 ceph_osdmap_destroy(oldmap);
38d6453c
SW
1365 }
1366 kick_requests(osdc, skipped_map);
f24e9980
SW
1367 }
1368 p += maplen;
1369 nr_maps--;
1370 }
1371
1372done:
1373 downgrade_write(&osdc->map_sem);
1374 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
cd634fb6
SW
1375
1376 /*
1377 * subscribe to subsequent osdmap updates if full to ensure
1378 * we find out when we are no longer full and stop returning
1379 * ENOSPC.
1380 */
1381 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1382 ceph_monc_request_next_osdmap(&osdc->client->monc);
1383
f9d25199
AE
1384 mutex_lock(&osdc->request_mutex);
1385 __send_queued(osdc);
1386 mutex_unlock(&osdc->request_mutex);
f24e9980 1387 up_read(&osdc->map_sem);
03066f23 1388 wake_up_all(&osdc->client->auth_wq);
f24e9980
SW
1389 return;
1390
1391bad:
1392 pr_err("osdc handle_map corrupt msg\n");
9ec7cab1 1393 ceph_msg_dump(msg);
f24e9980
SW
1394 up_write(&osdc->map_sem);
1395 return;
1396}
1397
a40c4f10
YS
1398/*
1399 * watch/notify callback event infrastructure
1400 *
1401 * These callbacks are used both for watch and notify operations.
1402 */
1403static void __release_event(struct kref *kref)
1404{
1405 struct ceph_osd_event *event =
1406 container_of(kref, struct ceph_osd_event, kref);
1407
1408 dout("__release_event %p\n", event);
1409 kfree(event);
1410}
1411
1412static void get_event(struct ceph_osd_event *event)
1413{
1414 kref_get(&event->kref);
1415}
1416
1417void ceph_osdc_put_event(struct ceph_osd_event *event)
1418{
1419 kref_put(&event->kref, __release_event);
1420}
1421EXPORT_SYMBOL(ceph_osdc_put_event);
1422
1423static void __insert_event(struct ceph_osd_client *osdc,
1424 struct ceph_osd_event *new)
1425{
1426 struct rb_node **p = &osdc->event_tree.rb_node;
1427 struct rb_node *parent = NULL;
1428 struct ceph_osd_event *event = NULL;
1429
1430 while (*p) {
1431 parent = *p;
1432 event = rb_entry(parent, struct ceph_osd_event, node);
1433 if (new->cookie < event->cookie)
1434 p = &(*p)->rb_left;
1435 else if (new->cookie > event->cookie)
1436 p = &(*p)->rb_right;
1437 else
1438 BUG();
1439 }
1440
1441 rb_link_node(&new->node, parent, p);
1442 rb_insert_color(&new->node, &osdc->event_tree);
1443}
1444
1445static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
1446 u64 cookie)
1447{
1448 struct rb_node **p = &osdc->event_tree.rb_node;
1449 struct rb_node *parent = NULL;
1450 struct ceph_osd_event *event = NULL;
1451
1452 while (*p) {
1453 parent = *p;
1454 event = rb_entry(parent, struct ceph_osd_event, node);
1455 if (cookie < event->cookie)
1456 p = &(*p)->rb_left;
1457 else if (cookie > event->cookie)
1458 p = &(*p)->rb_right;
1459 else
1460 return event;
1461 }
1462 return NULL;
1463}
1464
1465static void __remove_event(struct ceph_osd_event *event)
1466{
1467 struct ceph_osd_client *osdc = event->osdc;
1468
1469 if (!RB_EMPTY_NODE(&event->node)) {
1470 dout("__remove_event removed %p\n", event);
1471 rb_erase(&event->node, &osdc->event_tree);
1472 ceph_osdc_put_event(event);
1473 } else {
1474 dout("__remove_event didn't remove %p\n", event);
1475 }
1476}
1477
1478int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1479 void (*event_cb)(u64, u64, u8, void *),
1480 int one_shot, void *data,
1481 struct ceph_osd_event **pevent)
1482{
1483 struct ceph_osd_event *event;
1484
1485 event = kmalloc(sizeof(*event), GFP_NOIO);
1486 if (!event)
1487 return -ENOMEM;
1488
1489 dout("create_event %p\n", event);
1490 event->cb = event_cb;
1491 event->one_shot = one_shot;
1492 event->data = data;
1493 event->osdc = osdc;
1494 INIT_LIST_HEAD(&event->osd_node);
3ee5234d 1495 RB_CLEAR_NODE(&event->node);
a40c4f10
YS
1496 kref_init(&event->kref); /* one ref for us */
1497 kref_get(&event->kref); /* one ref for the caller */
1498 init_completion(&event->completion);
1499
1500 spin_lock(&osdc->event_lock);
1501 event->cookie = ++osdc->event_count;
1502 __insert_event(osdc, event);
1503 spin_unlock(&osdc->event_lock);
1504
1505 *pevent = event;
1506 return 0;
1507}
1508EXPORT_SYMBOL(ceph_osdc_create_event);
1509
1510void ceph_osdc_cancel_event(struct ceph_osd_event *event)
1511{
1512 struct ceph_osd_client *osdc = event->osdc;
1513
1514 dout("cancel_event %p\n", event);
1515 spin_lock(&osdc->event_lock);
1516 __remove_event(event);
1517 spin_unlock(&osdc->event_lock);
1518 ceph_osdc_put_event(event); /* caller's */
1519}
1520EXPORT_SYMBOL(ceph_osdc_cancel_event);
1521
1522
1523static void do_event_work(struct work_struct *work)
1524{
1525 struct ceph_osd_event_work *event_work =
1526 container_of(work, struct ceph_osd_event_work, work);
1527 struct ceph_osd_event *event = event_work->event;
1528 u64 ver = event_work->ver;
1529 u64 notify_id = event_work->notify_id;
1530 u8 opcode = event_work->opcode;
1531
1532 dout("do_event_work completing %p\n", event);
1533 event->cb(ver, notify_id, opcode, event->data);
1534 complete(&event->completion);
1535 dout("do_event_work completed %p\n", event);
1536 ceph_osdc_put_event(event);
1537 kfree(event_work);
1538}
1539
1540
1541/*
1542 * Process osd watch notifications
1543 */
1544void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1545{
1546 void *p, *end;
1547 u8 proto_ver;
1548 u64 cookie, ver, notify_id;
1549 u8 opcode;
1550 struct ceph_osd_event *event;
1551 struct ceph_osd_event_work *event_work;
1552
1553 p = msg->front.iov_base;
1554 end = p + msg->front.iov_len;
1555
1556 ceph_decode_8_safe(&p, end, proto_ver, bad);
1557 ceph_decode_8_safe(&p, end, opcode, bad);
1558 ceph_decode_64_safe(&p, end, cookie, bad);
1559 ceph_decode_64_safe(&p, end, ver, bad);
1560 ceph_decode_64_safe(&p, end, notify_id, bad);
1561
1562 spin_lock(&osdc->event_lock);
1563 event = __find_event(osdc, cookie);
1564 if (event) {
1565 get_event(event);
1566 if (event->one_shot)
1567 __remove_event(event);
1568 }
1569 spin_unlock(&osdc->event_lock);
1570 dout("handle_watch_notify cookie %lld ver %lld event %p\n",
1571 cookie, ver, event);
1572 if (event) {
1573 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
a40c4f10
YS
1574 if (!event_work) {
1575 dout("ERROR: could not allocate event_work\n");
1576 goto done_err;
1577 }
6b0ae409 1578 INIT_WORK(&event_work->work, do_event_work);
a40c4f10
YS
1579 event_work->event = event;
1580 event_work->ver = ver;
1581 event_work->notify_id = notify_id;
1582 event_work->opcode = opcode;
1583 if (!queue_work(osdc->notify_wq, &event_work->work)) {
1584 dout("WARNING: failed to queue notify event work\n");
1585 goto done_err;
1586 }
1587 }
1588
1589 return;
1590
1591done_err:
1592 complete(&event->completion);
1593 ceph_osdc_put_event(event);
1594 return;
1595
1596bad:
1597 pr_err("osdc handle_watch_notify corrupt msg\n");
1598 return;
1599}
1600
1601int ceph_osdc_wait_event(struct ceph_osd_event *event, unsigned long timeout)
1602{
1603 int err;
1604
1605 dout("wait_event %p\n", event);
1606 err = wait_for_completion_interruptible_timeout(&event->completion,
1607 timeout * HZ);
1608 ceph_osdc_put_event(event);
1609 if (err > 0)
1610 err = 0;
1611 dout("wait_event %p returns %d\n", event, err);
1612 return err;
1613}
1614EXPORT_SYMBOL(ceph_osdc_wait_event);
1615
f24e9980
SW
1616/*
1617 * Register request, send initial attempt.
1618 */
1619int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1620 struct ceph_osd_request *req,
1621 bool nofail)
1622{
c1ea8823 1623 int rc = 0;
f24e9980
SW
1624
1625 req->r_request->pages = req->r_pages;
1626 req->r_request->nr_pages = req->r_num_pages;
68b4476b
YS
1627#ifdef CONFIG_BLOCK
1628 req->r_request->bio = req->r_bio;
1629#endif
c885837f 1630 req->r_request->trail = &req->r_trail;
f24e9980
SW
1631
1632 register_request(osdc, req);
1633
1634 down_read(&osdc->map_sem);
1635 mutex_lock(&osdc->request_mutex);
c1ea8823
SW
1636 /*
1637 * a racing kick_requests() may have sent the message for us
1638 * while we dropped request_mutex above, so only send now if
1639 * the request still han't been touched yet.
1640 */
1641 if (req->r_sent == 0) {
38d6453c 1642 rc = __map_request(osdc, req, 0);
9d6fcb08
SW
1643 if (rc < 0) {
1644 if (nofail) {
1645 dout("osdc_start_request failed map, "
1646 " will retry %lld\n", req->r_tid);
1647 rc = 0;
1648 }
234af26f 1649 goto out_unlock;
9d6fcb08 1650 }
6f6c7006
SW
1651 if (req->r_osd == NULL) {
1652 dout("send_request %p no up osds in pg\n", req);
1653 ceph_monc_request_next_osdmap(&osdc->client->monc);
1654 } else {
56e925b6 1655 __send_request(osdc, req);
f24e9980 1656 }
56e925b6 1657 rc = 0;
f24e9980 1658 }
234af26f
DC
1659
1660out_unlock:
f24e9980
SW
1661 mutex_unlock(&osdc->request_mutex);
1662 up_read(&osdc->map_sem);
1663 return rc;
1664}
3d14c5d2 1665EXPORT_SYMBOL(ceph_osdc_start_request);
f24e9980
SW
1666
1667/*
1668 * wait for a request to complete
1669 */
1670int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1671 struct ceph_osd_request *req)
1672{
1673 int rc;
1674
1675 rc = wait_for_completion_interruptible(&req->r_completion);
1676 if (rc < 0) {
1677 mutex_lock(&osdc->request_mutex);
1678 __cancel_request(req);
529cfcc4 1679 __unregister_request(osdc, req);
f24e9980 1680 mutex_unlock(&osdc->request_mutex);
25845472 1681 complete_request(req);
529cfcc4 1682 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
f24e9980
SW
1683 return rc;
1684 }
1685
1686 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1687 return req->r_result;
1688}
3d14c5d2 1689EXPORT_SYMBOL(ceph_osdc_wait_request);
f24e9980
SW
1690
1691/*
1692 * sync - wait for all in-flight requests to flush. avoid starvation.
1693 */
1694void ceph_osdc_sync(struct ceph_osd_client *osdc)
1695{
1696 struct ceph_osd_request *req;
1697 u64 last_tid, next_tid = 0;
1698
1699 mutex_lock(&osdc->request_mutex);
1700 last_tid = osdc->last_tid;
1701 while (1) {
1702 req = __lookup_request_ge(osdc, next_tid);
1703 if (!req)
1704 break;
1705 if (req->r_tid > last_tid)
1706 break;
1707
1708 next_tid = req->r_tid + 1;
1709 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
1710 continue;
1711
1712 ceph_osdc_get_request(req);
1713 mutex_unlock(&osdc->request_mutex);
1714 dout("sync waiting on tid %llu (last is %llu)\n",
1715 req->r_tid, last_tid);
1716 wait_for_completion(&req->r_safe_completion);
1717 mutex_lock(&osdc->request_mutex);
1718 ceph_osdc_put_request(req);
1719 }
1720 mutex_unlock(&osdc->request_mutex);
1721 dout("sync done (thru tid %llu)\n", last_tid);
1722}
3d14c5d2 1723EXPORT_SYMBOL(ceph_osdc_sync);
f24e9980
SW
1724
1725/*
1726 * init, shutdown
1727 */
1728int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1729{
1730 int err;
1731
1732 dout("init\n");
1733 osdc->client = client;
1734 osdc->osdmap = NULL;
1735 init_rwsem(&osdc->map_sem);
1736 init_completion(&osdc->map_waiters);
1737 osdc->last_requested_map = 0;
1738 mutex_init(&osdc->request_mutex);
f24e9980
SW
1739 osdc->last_tid = 0;
1740 osdc->osds = RB_ROOT;
f5a2041b 1741 INIT_LIST_HEAD(&osdc->osd_lru);
f24e9980 1742 osdc->requests = RB_ROOT;
422d2cb8 1743 INIT_LIST_HEAD(&osdc->req_lru);
6f6c7006
SW
1744 INIT_LIST_HEAD(&osdc->req_unsent);
1745 INIT_LIST_HEAD(&osdc->req_notarget);
a40c4f10 1746 INIT_LIST_HEAD(&osdc->req_linger);
f24e9980
SW
1747 osdc->num_requests = 0;
1748 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
f5a2041b 1749 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
a40c4f10
YS
1750 spin_lock_init(&osdc->event_lock);
1751 osdc->event_tree = RB_ROOT;
1752 osdc->event_count = 0;
f5a2041b
YS
1753
1754 schedule_delayed_work(&osdc->osds_timeout_work,
3d14c5d2 1755 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
f24e9980 1756
5f44f142 1757 err = -ENOMEM;
f24e9980
SW
1758 osdc->req_mempool = mempool_create_kmalloc_pool(10,
1759 sizeof(struct ceph_osd_request));
1760 if (!osdc->req_mempool)
5f44f142 1761 goto out;
f24e9980 1762
d50b409f
SW
1763 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
1764 OSD_OP_FRONT_LEN, 10, true,
4f48280e 1765 "osd_op");
f24e9980 1766 if (err < 0)
5f44f142 1767 goto out_mempool;
d50b409f 1768 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4f48280e
SW
1769 OSD_OPREPLY_FRONT_LEN, 10, true,
1770 "osd_op_reply");
c16e7869
SW
1771 if (err < 0)
1772 goto out_msgpool;
a40c4f10
YS
1773
1774 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
1775 if (IS_ERR(osdc->notify_wq)) {
1776 err = PTR_ERR(osdc->notify_wq);
1777 osdc->notify_wq = NULL;
1778 goto out_msgpool;
1779 }
f24e9980 1780 return 0;
5f44f142 1781
c16e7869
SW
1782out_msgpool:
1783 ceph_msgpool_destroy(&osdc->msgpool_op);
5f44f142
SW
1784out_mempool:
1785 mempool_destroy(osdc->req_mempool);
1786out:
1787 return err;
f24e9980
SW
1788}
1789
1790void ceph_osdc_stop(struct ceph_osd_client *osdc)
1791{
a40c4f10
YS
1792 flush_workqueue(osdc->notify_wq);
1793 destroy_workqueue(osdc->notify_wq);
f24e9980 1794 cancel_delayed_work_sync(&osdc->timeout_work);
f5a2041b 1795 cancel_delayed_work_sync(&osdc->osds_timeout_work);
f24e9980
SW
1796 if (osdc->osdmap) {
1797 ceph_osdmap_destroy(osdc->osdmap);
1798 osdc->osdmap = NULL;
1799 }
aca420bc 1800 remove_all_osds(osdc);
f24e9980
SW
1801 mempool_destroy(osdc->req_mempool);
1802 ceph_msgpool_destroy(&osdc->msgpool_op);
c16e7869 1803 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
f24e9980
SW
1804}
1805
1806/*
1807 * Read some contiguous pages. If we cross a stripe boundary, shorten
1808 * *plen. Return number of bytes read, or error.
1809 */
1810int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1811 struct ceph_vino vino, struct ceph_file_layout *layout,
1812 u64 off, u64 *plen,
1813 u32 truncate_seq, u64 truncate_size,
b7495fc2 1814 struct page **pages, int num_pages, int page_align)
f24e9980
SW
1815{
1816 struct ceph_osd_request *req;
1817 int rc = 0;
1818
1819 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
1820 vino.snap, off, *plen);
1821 req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
1822 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
1823 NULL, 0, truncate_seq, truncate_size, NULL,
a3bea47e 1824 false, page_align);
6816282d
SW
1825 if (IS_ERR(req))
1826 return PTR_ERR(req);
f24e9980
SW
1827
1828 /* it may be a short read due to an object boundary */
1829 req->r_pages = pages;
f24e9980 1830
b7495fc2
SW
1831 dout("readpages final extent is %llu~%llu (%d pages align %d)\n",
1832 off, *plen, req->r_num_pages, page_align);
f24e9980
SW
1833
1834 rc = ceph_osdc_start_request(osdc, req, false);
1835 if (!rc)
1836 rc = ceph_osdc_wait_request(osdc, req);
1837
1838 ceph_osdc_put_request(req);
1839 dout("readpages result %d\n", rc);
1840 return rc;
1841}
3d14c5d2 1842EXPORT_SYMBOL(ceph_osdc_readpages);
f24e9980
SW
1843
1844/*
1845 * do a synchronous write on N pages
1846 */
1847int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1848 struct ceph_file_layout *layout,
1849 struct ceph_snap_context *snapc,
1850 u64 off, u64 len,
1851 u32 truncate_seq, u64 truncate_size,
1852 struct timespec *mtime,
24808826 1853 struct page **pages, int num_pages)
f24e9980
SW
1854{
1855 struct ceph_osd_request *req;
1856 int rc = 0;
b7495fc2 1857 int page_align = off & ~PAGE_MASK;
f24e9980
SW
1858
1859 BUG_ON(vino.snap != CEPH_NOSNAP);
1860 req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
1861 CEPH_OSD_OP_WRITE,
24808826 1862 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
fbf8685f 1863 snapc, 0,
f24e9980 1864 truncate_seq, truncate_size, mtime,
a3bea47e 1865 true, page_align);
6816282d
SW
1866 if (IS_ERR(req))
1867 return PTR_ERR(req);
f24e9980
SW
1868
1869 /* it may be a short write due to an object boundary */
1870 req->r_pages = pages;
f24e9980
SW
1871 dout("writepages %llu~%llu (%d pages)\n", off, len,
1872 req->r_num_pages);
1873
87f979d3 1874 rc = ceph_osdc_start_request(osdc, req, true);
f24e9980
SW
1875 if (!rc)
1876 rc = ceph_osdc_wait_request(osdc, req);
1877
1878 ceph_osdc_put_request(req);
1879 if (rc == 0)
1880 rc = len;
1881 dout("writepages result %d\n", rc);
1882 return rc;
1883}
3d14c5d2 1884EXPORT_SYMBOL(ceph_osdc_writepages);
f24e9980
SW
1885
1886/*
1887 * handle incoming message
1888 */
1889static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1890{
1891 struct ceph_osd *osd = con->private;
32c895e7 1892 struct ceph_osd_client *osdc;
f24e9980
SW
1893 int type = le16_to_cpu(msg->hdr.type);
1894
1895 if (!osd)
4a32f93d 1896 goto out;
32c895e7 1897 osdc = osd->o_osdc;
f24e9980
SW
1898
1899 switch (type) {
1900 case CEPH_MSG_OSD_MAP:
1901 ceph_osdc_handle_map(osdc, msg);
1902 break;
1903 case CEPH_MSG_OSD_OPREPLY:
350b1c32 1904 handle_reply(osdc, msg, con);
f24e9980 1905 break;
a40c4f10
YS
1906 case CEPH_MSG_WATCH_NOTIFY:
1907 handle_watch_notify(osdc, msg);
1908 break;
f24e9980
SW
1909
1910 default:
1911 pr_err("received unknown message type %d %s\n", type,
1912 ceph_msg_type_name(type));
1913 }
4a32f93d 1914out:
f24e9980
SW
1915 ceph_msg_put(msg);
1916}
1917
5b3a4db3 1918/*
21b667f6
SW
1919 * lookup and return message for incoming reply. set up reply message
1920 * pages.
5b3a4db3
SW
1921 */
1922static struct ceph_msg *get_reply(struct ceph_connection *con,
2450418c
YS
1923 struct ceph_msg_header *hdr,
1924 int *skip)
f24e9980
SW
1925{
1926 struct ceph_osd *osd = con->private;
1927 struct ceph_osd_client *osdc = osd->o_osdc;
2450418c 1928 struct ceph_msg *m;
0547a9b3 1929 struct ceph_osd_request *req;
5b3a4db3
SW
1930 int front = le32_to_cpu(hdr->front_len);
1931 int data_len = le32_to_cpu(hdr->data_len);
0547a9b3 1932 u64 tid;
f24e9980 1933
0547a9b3
YS
1934 tid = le64_to_cpu(hdr->tid);
1935 mutex_lock(&osdc->request_mutex);
1936 req = __lookup_request(osdc, tid);
1937 if (!req) {
1938 *skip = 1;
1939 m = NULL;
756a16a5
SW
1940 dout("get_reply unknown tid %llu from osd%d\n", tid,
1941 osd->o_osd);
0547a9b3
YS
1942 goto out;
1943 }
c16e7869
SW
1944
1945 if (req->r_con_filling_msg) {
8921d114 1946 dout("%s revoking msg %p from old con %p\n", __func__,
c16e7869 1947 req->r_reply, req->r_con_filling_msg);
8921d114 1948 ceph_msg_revoke_incoming(req->r_reply);
0d47766f 1949 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
6f46cb29 1950 req->r_con_filling_msg = NULL;
0547a9b3
YS
1951 }
1952
c16e7869
SW
1953 if (front > req->r_reply->front.iov_len) {
1954 pr_warning("get_reply front %d > preallocated %d\n",
1955 front, (int)req->r_reply->front.iov_len);
b61c2763 1956 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
a79832f2 1957 if (!m)
c16e7869
SW
1958 goto out;
1959 ceph_msg_put(req->r_reply);
1960 req->r_reply = m;
1961 }
1962 m = ceph_msg_get(req->r_reply);
1963
0547a9b3 1964 if (data_len > 0) {
b7495fc2 1965 int want = calc_pages_for(req->r_page_alignment, data_len);
21b667f6 1966
9cbb1d72 1967 if (req->r_pages && unlikely(req->r_num_pages < want)) {
9bb0ce2b
SW
1968 pr_warning("tid %lld reply has %d bytes %d pages, we"
1969 " had only %d pages ready\n", tid, data_len,
1970 want, req->r_num_pages);
0547a9b3
YS
1971 *skip = 1;
1972 ceph_msg_put(m);
a79832f2 1973 m = NULL;
21b667f6 1974 goto out;
0547a9b3 1975 }
21b667f6
SW
1976 m->pages = req->r_pages;
1977 m->nr_pages = req->r_num_pages;
c5c6b19d 1978 m->page_alignment = req->r_page_alignment;
68b4476b
YS
1979#ifdef CONFIG_BLOCK
1980 m->bio = req->r_bio;
1981#endif
0547a9b3 1982 }
5b3a4db3 1983 *skip = 0;
0d47766f 1984 req->r_con_filling_msg = con->ops->get(con);
c16e7869 1985 dout("get_reply tid %lld %p\n", tid, m);
0547a9b3
YS
1986
1987out:
1988 mutex_unlock(&osdc->request_mutex);
2450418c 1989 return m;
5b3a4db3
SW
1990
1991}
1992
1993static struct ceph_msg *alloc_msg(struct ceph_connection *con,
1994 struct ceph_msg_header *hdr,
1995 int *skip)
1996{
1997 struct ceph_osd *osd = con->private;
1998 int type = le16_to_cpu(hdr->type);
1999 int front = le32_to_cpu(hdr->front_len);
2000
1c20f2d2 2001 *skip = 0;
5b3a4db3
SW
2002 switch (type) {
2003 case CEPH_MSG_OSD_MAP:
a40c4f10 2004 case CEPH_MSG_WATCH_NOTIFY:
b61c2763 2005 return ceph_msg_new(type, front, GFP_NOFS, false);
5b3a4db3
SW
2006 case CEPH_MSG_OSD_OPREPLY:
2007 return get_reply(con, hdr, skip);
2008 default:
2009 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
2010 osd->o_osd);
2011 *skip = 1;
2012 return NULL;
2013 }
f24e9980
SW
2014}
2015
2016/*
2017 * Wrappers to refcount containing ceph_osd struct
2018 */
2019static struct ceph_connection *get_osd_con(struct ceph_connection *con)
2020{
2021 struct ceph_osd *osd = con->private;
2022 if (get_osd(osd))
2023 return con;
2024 return NULL;
2025}
2026
2027static void put_osd_con(struct ceph_connection *con)
2028{
2029 struct ceph_osd *osd = con->private;
2030 put_osd(osd);
2031}
2032
4e7a5dcd
SW
2033/*
2034 * authentication
2035 */
a3530df3
AE
2036/*
2037 * Note: returned pointer is the address of a structure that's
2038 * managed separately. Caller must *not* attempt to free it.
2039 */
2040static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
8f43fb53 2041 int *proto, int force_new)
4e7a5dcd
SW
2042{
2043 struct ceph_osd *o = con->private;
2044 struct ceph_osd_client *osdc = o->o_osdc;
2045 struct ceph_auth_client *ac = osdc->client->monc.auth;
74f1869f 2046 struct ceph_auth_handshake *auth = &o->o_auth;
4e7a5dcd 2047
74f1869f 2048 if (force_new && auth->authorizer) {
a255651d
AE
2049 if (ac->ops && ac->ops->destroy_authorizer)
2050 ac->ops->destroy_authorizer(ac, auth->authorizer);
74f1869f
AE
2051 auth->authorizer = NULL;
2052 }
a255651d 2053 if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
a3530df3
AE
2054 int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2055 auth);
4e7a5dcd 2056 if (ret)
a3530df3 2057 return ERR_PTR(ret);
4e7a5dcd 2058 }
4e7a5dcd 2059 *proto = ac->protocol;
74f1869f 2060
a3530df3 2061 return auth;
4e7a5dcd
SW
2062}
2063
2064
2065static int verify_authorizer_reply(struct ceph_connection *con, int len)
2066{
2067 struct ceph_osd *o = con->private;
2068 struct ceph_osd_client *osdc = o->o_osdc;
2069 struct ceph_auth_client *ac = osdc->client->monc.auth;
2070
a255651d
AE
2071 /*
2072 * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
2073 * XXX which do we do: succeed or fail?
2074 */
6c4a1915 2075 return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
4e7a5dcd
SW
2076}
2077
9bd2e6f8
SW
2078static int invalidate_authorizer(struct ceph_connection *con)
2079{
2080 struct ceph_osd *o = con->private;
2081 struct ceph_osd_client *osdc = o->o_osdc;
2082 struct ceph_auth_client *ac = osdc->client->monc.auth;
2083
a255651d 2084 if (ac->ops && ac->ops->invalidate_authorizer)
9bd2e6f8
SW
2085 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
2086
2087 return ceph_monc_validate_auth(&osdc->client->monc);
2088}
4e7a5dcd 2089
9e32789f 2090static const struct ceph_connection_operations osd_con_ops = {
f24e9980
SW
2091 .get = get_osd_con,
2092 .put = put_osd_con,
2093 .dispatch = dispatch,
4e7a5dcd
SW
2094 .get_authorizer = get_authorizer,
2095 .verify_authorizer_reply = verify_authorizer_reply,
9bd2e6f8 2096 .invalidate_authorizer = invalidate_authorizer,
f24e9980 2097 .alloc_msg = alloc_msg,
81b024e7 2098 .fault = osd_reset,
f24e9980 2099};