libceph: update ceph_osd_op_name()
[linux-2.6-block.git] / net / ceph / osd_client.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
f24e9980 2
3d14c5d2 3#include <linux/module.h>
f24e9980
SW
4#include <linux/err.h>
5#include <linux/highmem.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/slab.h>
9#include <linux/uaccess.h>
68b4476b
YS
10#ifdef CONFIG_BLOCK
11#include <linux/bio.h>
12#endif
f24e9980 13
3d14c5d2
YS
14#include <linux/ceph/libceph.h>
15#include <linux/ceph/osd_client.h>
16#include <linux/ceph/messenger.h>
17#include <linux/ceph/decode.h>
18#include <linux/ceph/auth.h>
19#include <linux/ceph/pagelist.h>
f24e9980 20
c16e7869
SW
21#define OSD_OP_FRONT_LEN 4096
22#define OSD_OPREPLY_FRONT_LEN 512
0d59ab81 23
9e32789f 24static const struct ceph_connection_operations osd_con_ops;
f24e9980 25
f9d25199 26static void __send_queued(struct ceph_osd_client *osdc);
6f6c7006 27static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
a40c4f10
YS
28static void __register_request(struct ceph_osd_client *osdc,
29 struct ceph_osd_request *req);
30static void __unregister_linger_request(struct ceph_osd_client *osdc,
31 struct ceph_osd_request *req);
56e925b6
SW
32static void __send_request(struct ceph_osd_client *osdc,
33 struct ceph_osd_request *req);
f24e9980 34
68b4476b
YS
35static int op_has_extent(int op)
36{
37 return (op == CEPH_OSD_OP_READ ||
38 op == CEPH_OSD_OP_WRITE);
39}
40
f24e9980
SW
41/*
42 * Implement client access to distributed object storage cluster.
43 *
44 * All data objects are stored within a cluster/cloud of OSDs, or
45 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
46 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
47 * remote daemons serving up and coordinating consistent and safe
48 * access to storage.
49 *
50 * Cluster membership and the mapping of data objects onto storage devices
51 * are described by the osd map.
52 *
53 * We keep track of pending OSD requests (read, write), resubmit
54 * requests to different OSDs when the cluster topology/data layout
55 * change, or retry the affected requests when the communications
56 * channel with an OSD is reset.
57 */
58
59/*
60 * calculate the mapping of a file extent onto an object, and fill out the
61 * request accordingly. shorten extent as necessary if it crosses an
62 * object boundary.
63 *
64 * fill osd op in request message.
65 */
e75b45cf 66static int calc_layout(struct ceph_vino vino,
d63b77f4
SW
67 struct ceph_file_layout *layout,
68 u64 off, u64 *plen,
69 struct ceph_osd_request *req,
70 struct ceph_osd_req_op *op)
f24e9980 71{
60e56f13
AE
72 u64 orig_len = *plen;
73 u64 bno = 0;
74 u64 objoff = 0;
75 u64 objlen = 0;
d63b77f4 76 int r;
f24e9980 77
60e56f13
AE
78 /* object extent? */
79 r = ceph_calc_file_object_mapping(layout, off, orig_len, &bno,
80 &objoff, &objlen);
d63b77f4
SW
81 if (r < 0)
82 return r;
60e56f13
AE
83 if (objlen < orig_len) {
84 *plen = objlen;
85 dout(" skipping last %llu, final file extent %llu~%llu\n",
86 orig_len - *plen, off, *plen);
87 }
88
89 if (op_has_extent(op->op)) {
90 u32 osize = le32_to_cpu(layout->fl_object_size);
91 op->extent.offset = objoff;
92 op->extent.length = objlen;
93 if (op->extent.truncate_size <= off - objoff) {
94 op->extent.truncate_size = 0;
95 } else {
96 op->extent.truncate_size -= off - objoff;
97 if (op->extent.truncate_size > osize)
98 op->extent.truncate_size = osize;
99 }
100 }
101 req->r_num_pages = calc_pages_for(off, *plen);
102 req->r_page_alignment = off & ~PAGE_MASK;
103 if (op->op == CEPH_OSD_OP_WRITE)
104 op->payload_len = *plen;
105
106 dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
107 bno, objoff, objlen, req->r_num_pages);
f24e9980 108
2dab036b 109 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
f24e9980 110 req->r_oid_len = strlen(req->r_oid);
d63b77f4
SW
111
112 return r;
f24e9980
SW
113}
114
f24e9980
SW
115/*
116 * requests
117 */
415e49a9 118void ceph_osdc_release_request(struct kref *kref)
f24e9980 119{
415e49a9
SW
120 struct ceph_osd_request *req = container_of(kref,
121 struct ceph_osd_request,
122 r_kref);
123
124 if (req->r_request)
125 ceph_msg_put(req->r_request);
0d59ab81 126 if (req->r_con_filling_msg) {
9cbb1d72
AE
127 dout("%s revoking msg %p from con %p\n", __func__,
128 req->r_reply, req->r_con_filling_msg);
8921d114 129 ceph_msg_revoke_incoming(req->r_reply);
0d47766f 130 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
9cbb1d72 131 req->r_con_filling_msg = NULL;
350b1c32 132 }
ab8cb34a
AE
133 if (req->r_reply)
134 ceph_msg_put(req->r_reply);
415e49a9
SW
135 if (req->r_own_pages)
136 ceph_release_page_vector(req->r_pages,
137 req->r_num_pages);
138 ceph_put_snap_context(req->r_snapc);
c885837f 139 ceph_pagelist_release(&req->r_trail);
415e49a9
SW
140 if (req->r_mempool)
141 mempool_free(req, req->r_osdc->req_mempool);
142 else
143 kfree(req);
f24e9980 144}
3d14c5d2 145EXPORT_SYMBOL(ceph_osdc_release_request);
68b4476b 146
3499e8a5 147struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
f24e9980 148 struct ceph_snap_context *snapc,
ae7ca4a3 149 unsigned int num_op,
3499e8a5 150 bool use_mempool,
54a54007 151 gfp_t gfp_flags)
f24e9980
SW
152{
153 struct ceph_osd_request *req;
154 struct ceph_msg *msg;
68b4476b 155 size_t msg_size = sizeof(struct ceph_osd_request_head);
3499e8a5 156
68b4476b 157 msg_size += num_op*sizeof(struct ceph_osd_op);
f24e9980
SW
158
159 if (use_mempool) {
3499e8a5 160 req = mempool_alloc(osdc->req_mempool, gfp_flags);
f24e9980
SW
161 memset(req, 0, sizeof(*req));
162 } else {
3499e8a5 163 req = kzalloc(sizeof(*req), gfp_flags);
f24e9980
SW
164 }
165 if (req == NULL)
a79832f2 166 return NULL;
f24e9980 167
f24e9980
SW
168 req->r_osdc = osdc;
169 req->r_mempool = use_mempool;
68b4476b 170
415e49a9 171 kref_init(&req->r_kref);
f24e9980
SW
172 init_completion(&req->r_completion);
173 init_completion(&req->r_safe_completion);
a978fa20 174 RB_CLEAR_NODE(&req->r_node);
f24e9980 175 INIT_LIST_HEAD(&req->r_unsafe_item);
a40c4f10
YS
176 INIT_LIST_HEAD(&req->r_linger_item);
177 INIT_LIST_HEAD(&req->r_linger_osd);
935b639a 178 INIT_LIST_HEAD(&req->r_req_lru_item);
cd43045c
SW
179 INIT_LIST_HEAD(&req->r_osd_item);
180
c16e7869
SW
181 /* create reply message */
182 if (use_mempool)
183 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
184 else
185 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
b61c2763 186 OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
a79832f2 187 if (!msg) {
c16e7869 188 ceph_osdc_put_request(req);
a79832f2 189 return NULL;
c16e7869
SW
190 }
191 req->r_reply = msg;
192
c885837f 193 ceph_pagelist_init(&req->r_trail);
d50b409f 194
c16e7869 195 /* create request message; allow space for oid */
224736d9 196 msg_size += MAX_OBJ_NAME_SIZE;
f24e9980
SW
197 if (snapc)
198 msg_size += sizeof(u64) * snapc->num_snaps;
199 if (use_mempool)
8f3bc053 200 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
f24e9980 201 else
b61c2763 202 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
a79832f2 203 if (!msg) {
f24e9980 204 ceph_osdc_put_request(req);
a79832f2 205 return NULL;
f24e9980 206 }
68b4476b 207
f24e9980 208 memset(msg->front.iov_base, 0, msg->front.iov_len);
3499e8a5
YS
209
210 req->r_request = msg;
3499e8a5
YS
211
212 return req;
213}
3d14c5d2 214EXPORT_SYMBOL(ceph_osdc_alloc_request);
3499e8a5 215
68b4476b
YS
216static void osd_req_encode_op(struct ceph_osd_request *req,
217 struct ceph_osd_op *dst,
218 struct ceph_osd_req_op *src)
219{
220 dst->op = cpu_to_le16(src->op);
221
065a68f9 222 switch (src->op) {
68b4476b
YS
223 case CEPH_OSD_OP_READ:
224 case CEPH_OSD_OP_WRITE:
225 dst->extent.offset =
226 cpu_to_le64(src->extent.offset);
227 dst->extent.length =
228 cpu_to_le64(src->extent.length);
229 dst->extent.truncate_size =
230 cpu_to_le64(src->extent.truncate_size);
231 dst->extent.truncate_seq =
232 cpu_to_le32(src->extent.truncate_seq);
233 break;
234
235 case CEPH_OSD_OP_GETXATTR:
236 case CEPH_OSD_OP_SETXATTR:
237 case CEPH_OSD_OP_CMPXATTR:
68b4476b
YS
238 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
239 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
240 dst->xattr.cmp_op = src->xattr.cmp_op;
241 dst->xattr.cmp_mode = src->xattr.cmp_mode;
c885837f 242 ceph_pagelist_append(&req->r_trail, src->xattr.name,
68b4476b 243 src->xattr.name_len);
c885837f 244 ceph_pagelist_append(&req->r_trail, src->xattr.val,
68b4476b
YS
245 src->xattr.value_len);
246 break;
ae1533b6 247 case CEPH_OSD_OP_CALL:
ae1533b6
YS
248 dst->cls.class_len = src->cls.class_len;
249 dst->cls.method_len = src->cls.method_len;
250 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
251
c885837f 252 ceph_pagelist_append(&req->r_trail, src->cls.class_name,
ae1533b6 253 src->cls.class_len);
c885837f 254 ceph_pagelist_append(&req->r_trail, src->cls.method_name,
ae1533b6 255 src->cls.method_len);
c885837f 256 ceph_pagelist_append(&req->r_trail, src->cls.indata,
ae1533b6
YS
257 src->cls.indata_len);
258 break;
259 case CEPH_OSD_OP_ROLLBACK:
260 dst->snap.snapid = cpu_to_le64(src->snap.snapid);
261 break;
68b4476b
YS
262 case CEPH_OSD_OP_STARTSYNC:
263 break;
a40c4f10
YS
264 case CEPH_OSD_OP_NOTIFY:
265 {
266 __le32 prot_ver = cpu_to_le32(src->watch.prot_ver);
267 __le32 timeout = cpu_to_le32(src->watch.timeout);
268
c885837f 269 ceph_pagelist_append(&req->r_trail,
a40c4f10 270 &prot_ver, sizeof(prot_ver));
c885837f 271 ceph_pagelist_append(&req->r_trail,
a40c4f10
YS
272 &timeout, sizeof(timeout));
273 }
274 case CEPH_OSD_OP_NOTIFY_ACK:
275 case CEPH_OSD_OP_WATCH:
276 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
277 dst->watch.ver = cpu_to_le64(src->watch.ver);
278 dst->watch.flag = src->watch.flag;
279 break;
68b4476b
YS
280 default:
281 pr_err("unrecognized osd opcode %d\n", dst->op);
282 WARN_ON(1);
283 break;
284 }
285 dst->payload_len = cpu_to_le32(src->payload_len);
286}
287
3499e8a5
YS
288/*
289 * build new request AND message
290 *
291 */
292void ceph_osdc_build_request(struct ceph_osd_request *req,
ae7ca4a3 293 u64 off, u64 len, unsigned int num_op,
68b4476b 294 struct ceph_osd_req_op *src_ops,
4d6b250b 295 struct ceph_snap_context *snapc, u64 snap_id,
af77f26c 296 struct timespec *mtime)
3499e8a5
YS
297{
298 struct ceph_msg *msg = req->r_request;
299 struct ceph_osd_request_head *head;
68b4476b 300 struct ceph_osd_req_op *src_op;
3499e8a5
YS
301 struct ceph_osd_op *op;
302 void *p;
3499e8a5 303 size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
3499e8a5 304 int flags = req->r_flags;
68b4476b
YS
305 u64 data_len = 0;
306 int i;
3499e8a5 307
d178a9e7
AE
308 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
309
f24e9980 310 head = msg->front.iov_base;
4d6b250b 311 head->snapid = cpu_to_le64(snap_id);
f24e9980
SW
312 op = (void *)(head + 1);
313 p = (void *)(op + num_op);
314
f24e9980
SW
315 req->r_snapc = ceph_get_snap_context(snapc);
316
317 head->client_inc = cpu_to_le32(1); /* always, for now. */
318 head->flags = cpu_to_le32(flags);
319 if (flags & CEPH_OSD_FLAG_WRITE)
320 ceph_encode_timespec(&head->mtime, mtime);
ae7ca4a3 321 BUG_ON(num_op > (unsigned int) ((u16) -1));
f24e9980 322 head->num_ops = cpu_to_le16(num_op);
f24e9980 323
f24e9980 324 /* fill in oid */
af77f26c
AE
325 head->object_len = cpu_to_le32(req->r_oid_len);
326 memcpy(p, req->r_oid, req->r_oid_len);
327 p += req->r_oid_len;
f24e9980 328
68b4476b 329 src_op = src_ops;
ae7ca4a3
AE
330 while (num_op--)
331 osd_req_encode_op(req, op++, src_op++);
68b4476b 332
c885837f 333 data_len += req->r_trail.length;
68b4476b 334
f24e9980
SW
335 if (snapc) {
336 head->snap_seq = cpu_to_le64(snapc->seq);
337 head->num_snaps = cpu_to_le32(snapc->num_snaps);
338 for (i = 0; i < snapc->num_snaps; i++) {
339 put_unaligned_le64(snapc->snaps[i], p);
340 p += sizeof(u64);
341 }
342 }
343
68b4476b
YS
344 if (flags & CEPH_OSD_FLAG_WRITE) {
345 req->r_request->hdr.data_off = cpu_to_le16(off);
0120be3c 346 req->r_request->hdr.data_len = cpu_to_le32(len + data_len);
68b4476b
YS
347 } else if (data_len) {
348 req->r_request->hdr.data_off = 0;
349 req->r_request->hdr.data_len = cpu_to_le32(data_len);
350 }
351
c5c6b19d
SW
352 req->r_request->page_alignment = req->r_page_alignment;
353
f24e9980 354 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
6f863e71
SW
355 msg_size = p - msg->front.iov_base;
356 msg->front.iov_len = msg_size;
357 msg->hdr.front_len = cpu_to_le32(msg_size);
3499e8a5
YS
358 return;
359}
3d14c5d2 360EXPORT_SYMBOL(ceph_osdc_build_request);
3499e8a5
YS
361
362/*
363 * build new request AND message, calculate layout, and adjust file
364 * extent as needed.
365 *
366 * if the file was recently truncated, we include information about its
367 * old and new size so that the object can be updated appropriately. (we
368 * avoid synchronously deleting truncated objects because it's slow.)
369 *
370 * if @do_sync, include a 'startsync' command so that the osd will flush
371 * data quickly.
372 */
373struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
374 struct ceph_file_layout *layout,
375 struct ceph_vino vino,
376 u64 off, u64 *plen,
377 int opcode, int flags,
378 struct ceph_snap_context *snapc,
379 int do_sync,
380 u32 truncate_seq,
381 u64 truncate_size,
382 struct timespec *mtime,
a3bea47e 383 bool use_mempool,
b7495fc2 384 int page_align)
3499e8a5 385{
ae7ca4a3 386 struct ceph_osd_req_op ops[2];
68b4476b 387 struct ceph_osd_request *req;
ae7ca4a3 388 unsigned int num_op = 1;
6816282d 389 int r;
68b4476b 390
ae7ca4a3
AE
391 memset(&ops, 0, sizeof ops);
392
68b4476b
YS
393 ops[0].op = opcode;
394 ops[0].extent.truncate_seq = truncate_seq;
395 ops[0].extent.truncate_size = truncate_size;
68b4476b
YS
396
397 if (do_sync) {
398 ops[1].op = CEPH_OSD_OP_STARTSYNC;
ae7ca4a3
AE
399 num_op++;
400 }
68b4476b 401
ae7ca4a3
AE
402 req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool,
403 GFP_NOFS);
4ad12621 404 if (!req)
6816282d 405 return ERR_PTR(-ENOMEM);
d178a9e7 406 req->r_flags = flags;
3499e8a5
YS
407
408 /* calculate max write size */
e75b45cf 409 r = calc_layout(vino, layout, off, plen, req, ops);
6816282d
SW
410 if (r < 0)
411 return ERR_PTR(r);
3499e8a5
YS
412 req->r_file_layout = *layout; /* keep a copy */
413
9bb0ce2b
SW
414 /* in case it differs from natural (file) alignment that
415 calc_layout filled in for us */
416 req->r_num_pages = calc_pages_for(page_align, *plen);
b7495fc2
SW
417 req->r_page_alignment = page_align;
418
ae7ca4a3
AE
419 ceph_osdc_build_request(req, off, *plen, num_op, ops,
420 snapc, vino.snap, mtime);
3499e8a5 421
f24e9980
SW
422 return req;
423}
3d14c5d2 424EXPORT_SYMBOL(ceph_osdc_new_request);
f24e9980
SW
425
426/*
427 * We keep osd requests in an rbtree, sorted by ->r_tid.
428 */
429static void __insert_request(struct ceph_osd_client *osdc,
430 struct ceph_osd_request *new)
431{
432 struct rb_node **p = &osdc->requests.rb_node;
433 struct rb_node *parent = NULL;
434 struct ceph_osd_request *req = NULL;
435
436 while (*p) {
437 parent = *p;
438 req = rb_entry(parent, struct ceph_osd_request, r_node);
439 if (new->r_tid < req->r_tid)
440 p = &(*p)->rb_left;
441 else if (new->r_tid > req->r_tid)
442 p = &(*p)->rb_right;
443 else
444 BUG();
445 }
446
447 rb_link_node(&new->r_node, parent, p);
448 rb_insert_color(&new->r_node, &osdc->requests);
449}
450
451static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
452 u64 tid)
453{
454 struct ceph_osd_request *req;
455 struct rb_node *n = osdc->requests.rb_node;
456
457 while (n) {
458 req = rb_entry(n, struct ceph_osd_request, r_node);
459 if (tid < req->r_tid)
460 n = n->rb_left;
461 else if (tid > req->r_tid)
462 n = n->rb_right;
463 else
464 return req;
465 }
466 return NULL;
467}
468
469static struct ceph_osd_request *
470__lookup_request_ge(struct ceph_osd_client *osdc,
471 u64 tid)
472{
473 struct ceph_osd_request *req;
474 struct rb_node *n = osdc->requests.rb_node;
475
476 while (n) {
477 req = rb_entry(n, struct ceph_osd_request, r_node);
478 if (tid < req->r_tid) {
479 if (!n->rb_left)
480 return req;
481 n = n->rb_left;
482 } else if (tid > req->r_tid) {
483 n = n->rb_right;
484 } else {
485 return req;
486 }
487 }
488 return NULL;
489}
490
6f6c7006
SW
491/*
492 * Resubmit requests pending on the given osd.
493 */
494static void __kick_osd_requests(struct ceph_osd_client *osdc,
495 struct ceph_osd *osd)
496{
a40c4f10 497 struct ceph_osd_request *req, *nreq;
6f6c7006
SW
498 int err;
499
500 dout("__kick_osd_requests osd%d\n", osd->o_osd);
501 err = __reset_osd(osdc, osd);
685a7555 502 if (err)
6f6c7006
SW
503 return;
504
505 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
506 list_move(&req->r_req_lru_item, &osdc->req_unsent);
507 dout("requeued %p tid %llu osd%d\n", req, req->r_tid,
508 osd->o_osd);
a40c4f10
YS
509 if (!req->r_linger)
510 req->r_flags |= CEPH_OSD_FLAG_RETRY;
511 }
512
513 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
514 r_linger_osd) {
77f38e0e
SW
515 /*
516 * reregister request prior to unregistering linger so
517 * that r_osd is preserved.
518 */
519 BUG_ON(!list_empty(&req->r_req_lru_item));
a40c4f10 520 __register_request(osdc, req);
77f38e0e
SW
521 list_add(&req->r_req_lru_item, &osdc->req_unsent);
522 list_add(&req->r_osd_item, &req->r_osd->o_requests);
523 __unregister_linger_request(osdc, req);
a40c4f10
YS
524 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
525 osd->o_osd);
6f6c7006
SW
526 }
527}
528
f24e9980 529/*
81b024e7 530 * If the osd connection drops, we need to resubmit all requests.
f24e9980
SW
531 */
532static void osd_reset(struct ceph_connection *con)
533{
534 struct ceph_osd *osd = con->private;
535 struct ceph_osd_client *osdc;
536
537 if (!osd)
538 return;
539 dout("osd_reset osd%d\n", osd->o_osd);
540 osdc = osd->o_osdc;
f24e9980 541 down_read(&osdc->map_sem);
83aff95e
SW
542 mutex_lock(&osdc->request_mutex);
543 __kick_osd_requests(osdc, osd);
f9d25199 544 __send_queued(osdc);
83aff95e 545 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
546 up_read(&osdc->map_sem);
547}
548
549/*
550 * Track open sessions with osds.
551 */
e10006f8 552static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
f24e9980
SW
553{
554 struct ceph_osd *osd;
555
556 osd = kzalloc(sizeof(*osd), GFP_NOFS);
557 if (!osd)
558 return NULL;
559
560 atomic_set(&osd->o_ref, 1);
561 osd->o_osdc = osdc;
e10006f8 562 osd->o_osd = onum;
f407731d 563 RB_CLEAR_NODE(&osd->o_node);
f24e9980 564 INIT_LIST_HEAD(&osd->o_requests);
a40c4f10 565 INIT_LIST_HEAD(&osd->o_linger_requests);
f5a2041b 566 INIT_LIST_HEAD(&osd->o_osd_lru);
f24e9980
SW
567 osd->o_incarnation = 1;
568
b7a9e5dd 569 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
4e7a5dcd 570
422d2cb8 571 INIT_LIST_HEAD(&osd->o_keepalive_item);
f24e9980
SW
572 return osd;
573}
574
575static struct ceph_osd *get_osd(struct ceph_osd *osd)
576{
577 if (atomic_inc_not_zero(&osd->o_ref)) {
578 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
579 atomic_read(&osd->o_ref));
580 return osd;
581 } else {
582 dout("get_osd %p FAIL\n", osd);
583 return NULL;
584 }
585}
586
587static void put_osd(struct ceph_osd *osd)
588{
589 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
590 atomic_read(&osd->o_ref) - 1);
a255651d 591 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
79494d1b
SW
592 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
593
a255651d 594 if (ac->ops && ac->ops->destroy_authorizer)
6c4a1915 595 ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
f24e9980 596 kfree(osd);
79494d1b 597 }
f24e9980
SW
598}
599
600/*
601 * remove an osd from our map
602 */
f5a2041b 603static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 604{
f5a2041b 605 dout("__remove_osd %p\n", osd);
f24e9980
SW
606 BUG_ON(!list_empty(&osd->o_requests));
607 rb_erase(&osd->o_node, &osdc->osds);
f5a2041b 608 list_del_init(&osd->o_osd_lru);
f24e9980
SW
609 ceph_con_close(&osd->o_con);
610 put_osd(osd);
611}
612
aca420bc
SW
613static void remove_all_osds(struct ceph_osd_client *osdc)
614{
048a9d2d 615 dout("%s %p\n", __func__, osdc);
aca420bc
SW
616 mutex_lock(&osdc->request_mutex);
617 while (!RB_EMPTY_ROOT(&osdc->osds)) {
618 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
619 struct ceph_osd, o_node);
620 __remove_osd(osdc, osd);
621 }
622 mutex_unlock(&osdc->request_mutex);
623}
624
f5a2041b
YS
625static void __move_osd_to_lru(struct ceph_osd_client *osdc,
626 struct ceph_osd *osd)
627{
628 dout("__move_osd_to_lru %p\n", osd);
629 BUG_ON(!list_empty(&osd->o_osd_lru));
630 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
3d14c5d2 631 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
f5a2041b
YS
632}
633
634static void __remove_osd_from_lru(struct ceph_osd *osd)
635{
636 dout("__remove_osd_from_lru %p\n", osd);
637 if (!list_empty(&osd->o_osd_lru))
638 list_del_init(&osd->o_osd_lru);
639}
640
aca420bc 641static void remove_old_osds(struct ceph_osd_client *osdc)
f5a2041b
YS
642{
643 struct ceph_osd *osd, *nosd;
644
645 dout("__remove_old_osds %p\n", osdc);
646 mutex_lock(&osdc->request_mutex);
647 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
aca420bc 648 if (time_before(jiffies, osd->lru_ttl))
f5a2041b
YS
649 break;
650 __remove_osd(osdc, osd);
651 }
652 mutex_unlock(&osdc->request_mutex);
653}
654
f24e9980
SW
655/*
656 * reset osd connect
657 */
f5a2041b 658static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 659{
c3acb181 660 struct ceph_entity_addr *peer_addr;
f24e9980 661
f5a2041b 662 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
a40c4f10
YS
663 if (list_empty(&osd->o_requests) &&
664 list_empty(&osd->o_linger_requests)) {
f5a2041b 665 __remove_osd(osdc, osd);
c3acb181
AE
666
667 return -ENODEV;
668 }
669
670 peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
671 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
672 !ceph_con_opened(&osd->o_con)) {
673 struct ceph_osd_request *req;
674
87b315a5
SW
675 dout(" osd addr hasn't changed and connection never opened,"
676 " letting msgr retry");
677 /* touch each r_stamp for handle_timeout()'s benfit */
678 list_for_each_entry(req, &osd->o_requests, r_osd_item)
679 req->r_stamp = jiffies;
c3acb181
AE
680
681 return -EAGAIN;
f24e9980 682 }
c3acb181
AE
683
684 ceph_con_close(&osd->o_con);
685 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
686 osd->o_incarnation++;
687
688 return 0;
f24e9980
SW
689}
690
691static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
692{
693 struct rb_node **p = &osdc->osds.rb_node;
694 struct rb_node *parent = NULL;
695 struct ceph_osd *osd = NULL;
696
aca420bc 697 dout("__insert_osd %p osd%d\n", new, new->o_osd);
f24e9980
SW
698 while (*p) {
699 parent = *p;
700 osd = rb_entry(parent, struct ceph_osd, o_node);
701 if (new->o_osd < osd->o_osd)
702 p = &(*p)->rb_left;
703 else if (new->o_osd > osd->o_osd)
704 p = &(*p)->rb_right;
705 else
706 BUG();
707 }
708
709 rb_link_node(&new->o_node, parent, p);
710 rb_insert_color(&new->o_node, &osdc->osds);
711}
712
713static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
714{
715 struct ceph_osd *osd;
716 struct rb_node *n = osdc->osds.rb_node;
717
718 while (n) {
719 osd = rb_entry(n, struct ceph_osd, o_node);
720 if (o < osd->o_osd)
721 n = n->rb_left;
722 else if (o > osd->o_osd)
723 n = n->rb_right;
724 else
725 return osd;
726 }
727 return NULL;
728}
729
422d2cb8
YS
730static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
731{
732 schedule_delayed_work(&osdc->timeout_work,
3d14c5d2 733 osdc->client->options->osd_keepalive_timeout * HZ);
422d2cb8
YS
734}
735
736static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
737{
738 cancel_delayed_work(&osdc->timeout_work);
739}
f24e9980
SW
740
741/*
742 * Register request, assign tid. If this is the first request, set up
743 * the timeout event.
744 */
a40c4f10
YS
745static void __register_request(struct ceph_osd_client *osdc,
746 struct ceph_osd_request *req)
f24e9980 747{
f24e9980 748 req->r_tid = ++osdc->last_tid;
6df058c0 749 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
77f38e0e 750 dout("__register_request %p tid %lld\n", req, req->r_tid);
f24e9980
SW
751 __insert_request(osdc, req);
752 ceph_osdc_get_request(req);
753 osdc->num_requests++;
f24e9980 754 if (osdc->num_requests == 1) {
422d2cb8
YS
755 dout(" first request, scheduling timeout\n");
756 __schedule_osd_timeout(osdc);
f24e9980 757 }
a40c4f10
YS
758}
759
760static void register_request(struct ceph_osd_client *osdc,
761 struct ceph_osd_request *req)
762{
763 mutex_lock(&osdc->request_mutex);
764 __register_request(osdc, req);
f24e9980
SW
765 mutex_unlock(&osdc->request_mutex);
766}
767
768/*
769 * called under osdc->request_mutex
770 */
771static void __unregister_request(struct ceph_osd_client *osdc,
772 struct ceph_osd_request *req)
773{
35f9f8a0
SW
774 if (RB_EMPTY_NODE(&req->r_node)) {
775 dout("__unregister_request %p tid %lld not registered\n",
776 req, req->r_tid);
777 return;
778 }
779
f24e9980
SW
780 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
781 rb_erase(&req->r_node, &osdc->requests);
782 osdc->num_requests--;
783
0ba6478d
SW
784 if (req->r_osd) {
785 /* make sure the original request isn't in flight. */
6740a845 786 ceph_msg_revoke(req->r_request);
0ba6478d
SW
787
788 list_del_init(&req->r_osd_item);
a40c4f10
YS
789 if (list_empty(&req->r_osd->o_requests) &&
790 list_empty(&req->r_osd->o_linger_requests)) {
791 dout("moving osd to %p lru\n", req->r_osd);
f5a2041b 792 __move_osd_to_lru(osdc, req->r_osd);
a40c4f10 793 }
fbdb9190 794 if (list_empty(&req->r_linger_item))
a40c4f10 795 req->r_osd = NULL;
0ba6478d 796 }
f24e9980 797
7d5f2481 798 list_del_init(&req->r_req_lru_item);
f24e9980
SW
799 ceph_osdc_put_request(req);
800
422d2cb8
YS
801 if (osdc->num_requests == 0) {
802 dout(" no requests, canceling timeout\n");
803 __cancel_osd_timeout(osdc);
f24e9980
SW
804 }
805}
806
807/*
808 * Cancel a previously queued request message
809 */
810static void __cancel_request(struct ceph_osd_request *req)
811{
6bc18876 812 if (req->r_sent && req->r_osd) {
6740a845 813 ceph_msg_revoke(req->r_request);
f24e9980
SW
814 req->r_sent = 0;
815 }
816}
817
a40c4f10
YS
818static void __register_linger_request(struct ceph_osd_client *osdc,
819 struct ceph_osd_request *req)
820{
821 dout("__register_linger_request %p\n", req);
822 list_add_tail(&req->r_linger_item, &osdc->req_linger);
6194ea89
SW
823 if (req->r_osd)
824 list_add_tail(&req->r_linger_osd,
825 &req->r_osd->o_linger_requests);
a40c4f10
YS
826}
827
828static void __unregister_linger_request(struct ceph_osd_client *osdc,
829 struct ceph_osd_request *req)
830{
831 dout("__unregister_linger_request %p\n", req);
61c74035 832 list_del_init(&req->r_linger_item);
a40c4f10 833 if (req->r_osd) {
a40c4f10
YS
834 list_del_init(&req->r_linger_osd);
835
836 if (list_empty(&req->r_osd->o_requests) &&
837 list_empty(&req->r_osd->o_linger_requests)) {
838 dout("moving osd to %p lru\n", req->r_osd);
839 __move_osd_to_lru(osdc, req->r_osd);
840 }
fbdb9190
SW
841 if (list_empty(&req->r_osd_item))
842 req->r_osd = NULL;
a40c4f10
YS
843 }
844}
845
846void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
847 struct ceph_osd_request *req)
848{
849 mutex_lock(&osdc->request_mutex);
850 if (req->r_linger) {
851 __unregister_linger_request(osdc, req);
852 ceph_osdc_put_request(req);
853 }
854 mutex_unlock(&osdc->request_mutex);
855}
856EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
857
858void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
859 struct ceph_osd_request *req)
860{
861 if (!req->r_linger) {
862 dout("set_request_linger %p\n", req);
863 req->r_linger = 1;
864 /*
865 * caller is now responsible for calling
866 * unregister_linger_request
867 */
868 ceph_osdc_get_request(req);
869 }
870}
871EXPORT_SYMBOL(ceph_osdc_set_request_linger);
872
f24e9980
SW
873/*
874 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
875 * (as needed), and set the request r_osd appropriately. If there is
25985edc 876 * no up osd, set r_osd to NULL. Move the request to the appropriate list
6f6c7006 877 * (unsent, homeless) or leave on in-flight lru.
f24e9980
SW
878 *
879 * Return 0 if unchanged, 1 if changed, or negative on error.
880 *
881 * Caller should hold map_sem for read and request_mutex.
882 */
6f6c7006 883static int __map_request(struct ceph_osd_client *osdc,
38d6453c 884 struct ceph_osd_request *req, int force_resend)
f24e9980
SW
885{
886 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
51042122 887 struct ceph_pg pgid;
d85b7056
SW
888 int acting[CEPH_PG_MAX_SIZE];
889 int o = -1, num = 0;
f24e9980 890 int err;
f24e9980 891
6f6c7006 892 dout("map_request %p tid %lld\n", req, req->r_tid);
f24e9980
SW
893 err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
894 &req->r_file_layout, osdc->osdmap);
6f6c7006
SW
895 if (err) {
896 list_move(&req->r_req_lru_item, &osdc->req_notarget);
f24e9980 897 return err;
6f6c7006 898 }
51042122 899 pgid = reqhead->layout.ol_pgid;
7740a42f
SW
900 req->r_pgid = pgid;
901
d85b7056
SW
902 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
903 if (err > 0) {
904 o = acting[0];
905 num = err;
906 }
f24e9980 907
38d6453c
SW
908 if ((!force_resend &&
909 req->r_osd && req->r_osd->o_osd == o &&
d85b7056
SW
910 req->r_sent >= req->r_osd->o_incarnation &&
911 req->r_num_pg_osds == num &&
912 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
f24e9980
SW
913 (req->r_osd == NULL && o == -1))
914 return 0; /* no change */
915
6f6c7006 916 dout("map_request tid %llu pgid %d.%x osd%d (was osd%d)\n",
51042122 917 req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
f24e9980
SW
918 req->r_osd ? req->r_osd->o_osd : -1);
919
d85b7056
SW
920 /* record full pg acting set */
921 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
922 req->r_num_pg_osds = num;
923
f24e9980
SW
924 if (req->r_osd) {
925 __cancel_request(req);
926 list_del_init(&req->r_osd_item);
f24e9980
SW
927 req->r_osd = NULL;
928 }
929
930 req->r_osd = __lookup_osd(osdc, o);
931 if (!req->r_osd && o >= 0) {
c99eb1c7 932 err = -ENOMEM;
e10006f8 933 req->r_osd = create_osd(osdc, o);
6f6c7006
SW
934 if (!req->r_osd) {
935 list_move(&req->r_req_lru_item, &osdc->req_notarget);
c99eb1c7 936 goto out;
6f6c7006 937 }
f24e9980 938
6f6c7006 939 dout("map_request osd %p is osd%d\n", req->r_osd, o);
f24e9980
SW
940 __insert_osd(osdc, req->r_osd);
941
b7a9e5dd
SW
942 ceph_con_open(&req->r_osd->o_con,
943 CEPH_ENTITY_TYPE_OSD, o,
944 &osdc->osdmap->osd_addr[o]);
f24e9980
SW
945 }
946
f5a2041b
YS
947 if (req->r_osd) {
948 __remove_osd_from_lru(req->r_osd);
f24e9980 949 list_add(&req->r_osd_item, &req->r_osd->o_requests);
6f6c7006
SW
950 list_move(&req->r_req_lru_item, &osdc->req_unsent);
951 } else {
952 list_move(&req->r_req_lru_item, &osdc->req_notarget);
f5a2041b 953 }
d85b7056 954 err = 1; /* osd or pg changed */
f24e9980
SW
955
956out:
f24e9980
SW
957 return err;
958}
959
960/*
961 * caller should hold map_sem (for read) and request_mutex
962 */
56e925b6
SW
963static void __send_request(struct ceph_osd_client *osdc,
964 struct ceph_osd_request *req)
f24e9980
SW
965{
966 struct ceph_osd_request_head *reqhead;
f24e9980
SW
967
968 dout("send_request %p tid %llu to osd%d flags %d\n",
969 req, req->r_tid, req->r_osd->o_osd, req->r_flags);
970
971 reqhead = req->r_request->front.iov_base;
972 reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
973 reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */
974 reqhead->reassert_version = req->r_reassert_version;
975
3dd72fc0 976 req->r_stamp = jiffies;
07a27e22 977 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
f24e9980
SW
978
979 ceph_msg_get(req->r_request); /* send consumes a ref */
980 ceph_con_send(&req->r_osd->o_con, req->r_request);
981 req->r_sent = req->r_osd->o_incarnation;
f24e9980
SW
982}
983
6f6c7006
SW
984/*
985 * Send any requests in the queue (req_unsent).
986 */
f9d25199 987static void __send_queued(struct ceph_osd_client *osdc)
6f6c7006
SW
988{
989 struct ceph_osd_request *req, *tmp;
990
f9d25199
AE
991 dout("__send_queued\n");
992 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
6f6c7006 993 __send_request(osdc, req);
6f6c7006
SW
994}
995
f24e9980
SW
996/*
997 * Timeout callback, called every N seconds when 1 or more osd
998 * requests has been active for more than N seconds. When this
999 * happens, we ping all OSDs with requests who have timed out to
1000 * ensure any communications channel reset is detected. Reset the
1001 * request timeouts another N seconds in the future as we go.
1002 * Reschedule the timeout event another N seconds in future (unless
1003 * there are no open requests).
1004 */
1005static void handle_timeout(struct work_struct *work)
1006{
1007 struct ceph_osd_client *osdc =
1008 container_of(work, struct ceph_osd_client, timeout_work.work);
83aff95e 1009 struct ceph_osd_request *req;
f24e9980 1010 struct ceph_osd *osd;
422d2cb8 1011 unsigned long keepalive =
3d14c5d2 1012 osdc->client->options->osd_keepalive_timeout * HZ;
422d2cb8 1013 struct list_head slow_osds;
f24e9980
SW
1014 dout("timeout\n");
1015 down_read(&osdc->map_sem);
1016
1017 ceph_monc_request_next_osdmap(&osdc->client->monc);
1018
1019 mutex_lock(&osdc->request_mutex);
f24e9980 1020
422d2cb8
YS
1021 /*
1022 * ping osds that are a bit slow. this ensures that if there
1023 * is a break in the TCP connection we will notice, and reopen
1024 * a connection with that osd (from the fault callback).
1025 */
1026 INIT_LIST_HEAD(&slow_osds);
1027 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
3dd72fc0 1028 if (time_before(jiffies, req->r_stamp + keepalive))
422d2cb8
YS
1029 break;
1030
1031 osd = req->r_osd;
1032 BUG_ON(!osd);
1033 dout(" tid %llu is slow, will send keepalive on osd%d\n",
f24e9980 1034 req->r_tid, osd->o_osd);
422d2cb8
YS
1035 list_move_tail(&osd->o_keepalive_item, &slow_osds);
1036 }
1037 while (!list_empty(&slow_osds)) {
1038 osd = list_entry(slow_osds.next, struct ceph_osd,
1039 o_keepalive_item);
1040 list_del_init(&osd->o_keepalive_item);
f24e9980
SW
1041 ceph_con_keepalive(&osd->o_con);
1042 }
1043
422d2cb8 1044 __schedule_osd_timeout(osdc);
f9d25199 1045 __send_queued(osdc);
f24e9980 1046 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1047 up_read(&osdc->map_sem);
1048}
1049
f5a2041b
YS
1050static void handle_osds_timeout(struct work_struct *work)
1051{
1052 struct ceph_osd_client *osdc =
1053 container_of(work, struct ceph_osd_client,
1054 osds_timeout_work.work);
1055 unsigned long delay =
3d14c5d2 1056 osdc->client->options->osd_idle_ttl * HZ >> 2;
f5a2041b
YS
1057
1058 dout("osds timeout\n");
1059 down_read(&osdc->map_sem);
aca420bc 1060 remove_old_osds(osdc);
f5a2041b
YS
1061 up_read(&osdc->map_sem);
1062
1063 schedule_delayed_work(&osdc->osds_timeout_work,
1064 round_jiffies_relative(delay));
1065}
1066
25845472
SW
1067static void complete_request(struct ceph_osd_request *req)
1068{
1069 if (req->r_safe_callback)
1070 req->r_safe_callback(req, NULL);
1071 complete_all(&req->r_safe_completion); /* fsync waiter */
1072}
1073
f24e9980
SW
1074/*
1075 * handle osd op reply. either call the callback if it is specified,
1076 * or do the completion to wake up the waiting thread.
1077 */
350b1c32
SW
1078static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1079 struct ceph_connection *con)
f24e9980
SW
1080{
1081 struct ceph_osd_reply_head *rhead = msg->front.iov_base;
1082 struct ceph_osd_request *req;
1083 u64 tid;
1084 int numops, object_len, flags;
0ceed5db 1085 s32 result;
f24e9980 1086
6df058c0 1087 tid = le64_to_cpu(msg->hdr.tid);
f24e9980
SW
1088 if (msg->front.iov_len < sizeof(*rhead))
1089 goto bad;
f24e9980
SW
1090 numops = le32_to_cpu(rhead->num_ops);
1091 object_len = le32_to_cpu(rhead->object_len);
0ceed5db 1092 result = le32_to_cpu(rhead->result);
f24e9980
SW
1093 if (msg->front.iov_len != sizeof(*rhead) + object_len +
1094 numops * sizeof(struct ceph_osd_op))
1095 goto bad;
0ceed5db 1096 dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result);
f24e9980
SW
1097 /* lookup */
1098 mutex_lock(&osdc->request_mutex);
1099 req = __lookup_request(osdc, tid);
1100 if (req == NULL) {
1101 dout("handle_reply tid %llu dne\n", tid);
1102 mutex_unlock(&osdc->request_mutex);
1103 return;
1104 }
1105 ceph_osdc_get_request(req);
1106 flags = le32_to_cpu(rhead->flags);
1107
350b1c32 1108 /*
0d59ab81 1109 * if this connection filled our message, drop our reference now, to
350b1c32
SW
1110 * avoid a (safe but slower) revoke later.
1111 */
0d59ab81 1112 if (req->r_con_filling_msg == con && req->r_reply == msg) {
c16e7869 1113 dout(" dropping con_filling_msg ref %p\n", con);
0d59ab81 1114 req->r_con_filling_msg = NULL;
0d47766f 1115 con->ops->put(con);
350b1c32
SW
1116 }
1117
f24e9980 1118 if (!req->r_got_reply) {
95c96174 1119 unsigned int bytes;
f24e9980
SW
1120
1121 req->r_result = le32_to_cpu(rhead->result);
1122 bytes = le32_to_cpu(msg->hdr.data_len);
1123 dout("handle_reply result %d bytes %d\n", req->r_result,
1124 bytes);
1125 if (req->r_result == 0)
1126 req->r_result = bytes;
1127
1128 /* in case this is a write and we need to replay, */
1129 req->r_reassert_version = rhead->reassert_version;
1130
1131 req->r_got_reply = 1;
1132 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1133 dout("handle_reply tid %llu dup ack\n", tid);
34b43a56 1134 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1135 goto done;
1136 }
1137
1138 dout("handle_reply tid %llu flags %d\n", tid, flags);
1139
a40c4f10
YS
1140 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
1141 __register_linger_request(osdc, req);
1142
f24e9980 1143 /* either this is a read, or we got the safe response */
0ceed5db
SW
1144 if (result < 0 ||
1145 (flags & CEPH_OSD_FLAG_ONDISK) ||
f24e9980
SW
1146 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
1147 __unregister_request(osdc, req);
1148
1149 mutex_unlock(&osdc->request_mutex);
1150
1151 if (req->r_callback)
1152 req->r_callback(req, msg);
1153 else
03066f23 1154 complete_all(&req->r_completion);
f24e9980 1155
25845472
SW
1156 if (flags & CEPH_OSD_FLAG_ONDISK)
1157 complete_request(req);
f24e9980
SW
1158
1159done:
a40c4f10 1160 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
f24e9980
SW
1161 ceph_osdc_put_request(req);
1162 return;
1163
1164bad:
1165 pr_err("corrupt osd_op_reply got %d %d expected %d\n",
1166 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
1167 (int)sizeof(*rhead));
9ec7cab1 1168 ceph_msg_dump(msg);
f24e9980
SW
1169}
1170
6f6c7006 1171static void reset_changed_osds(struct ceph_osd_client *osdc)
f24e9980 1172{
f24e9980 1173 struct rb_node *p, *n;
f24e9980 1174
6f6c7006
SW
1175 for (p = rb_first(&osdc->osds); p; p = n) {
1176 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
f24e9980 1177
6f6c7006
SW
1178 n = rb_next(p);
1179 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1180 memcmp(&osd->o_con.peer_addr,
1181 ceph_osd_addr(osdc->osdmap,
1182 osd->o_osd),
1183 sizeof(struct ceph_entity_addr)) != 0)
1184 __reset_osd(osdc, osd);
f24e9980 1185 }
422d2cb8
YS
1186}
1187
1188/*
6f6c7006
SW
1189 * Requeue requests whose mapping to an OSD has changed. If requests map to
1190 * no osd, request a new map.
422d2cb8 1191 *
e6d50f67 1192 * Caller should hold map_sem for read.
422d2cb8 1193 */
38d6453c 1194static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
422d2cb8 1195{
a40c4f10 1196 struct ceph_osd_request *req, *nreq;
6f6c7006
SW
1197 struct rb_node *p;
1198 int needmap = 0;
1199 int err;
422d2cb8 1200
38d6453c 1201 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
422d2cb8 1202 mutex_lock(&osdc->request_mutex);
6194ea89 1203 for (p = rb_first(&osdc->requests); p; ) {
6f6c7006 1204 req = rb_entry(p, struct ceph_osd_request, r_node);
6194ea89 1205 p = rb_next(p);
ab60b16d
AE
1206
1207 /*
1208 * For linger requests that have not yet been
1209 * registered, move them to the linger list; they'll
1210 * be sent to the osd in the loop below. Unregister
1211 * the request before re-registering it as a linger
1212 * request to ensure the __map_request() below
1213 * will decide it needs to be sent.
1214 */
1215 if (req->r_linger && list_empty(&req->r_linger_item)) {
1216 dout("%p tid %llu restart on osd%d\n",
1217 req, req->r_tid,
1218 req->r_osd ? req->r_osd->o_osd : -1);
1219 __unregister_request(osdc, req);
1220 __register_linger_request(osdc, req);
1221 continue;
1222 }
1223
38d6453c 1224 err = __map_request(osdc, req, force_resend);
6f6c7006
SW
1225 if (err < 0)
1226 continue; /* error */
1227 if (req->r_osd == NULL) {
1228 dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1229 needmap++; /* request a newer map */
1230 } else if (err > 0) {
6194ea89
SW
1231 if (!req->r_linger) {
1232 dout("%p tid %llu requeued on osd%d\n", req,
1233 req->r_tid,
1234 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1235 req->r_flags |= CEPH_OSD_FLAG_RETRY;
6194ea89
SW
1236 }
1237 }
a40c4f10
YS
1238 }
1239
1240 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
1241 r_linger_item) {
1242 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1243
38d6453c 1244 err = __map_request(osdc, req, force_resend);
ab60b16d 1245 dout("__map_request returned %d\n", err);
a40c4f10
YS
1246 if (err == 0)
1247 continue; /* no change and no osd was specified */
1248 if (err < 0)
1249 continue; /* hrm! */
1250 if (req->r_osd == NULL) {
1251 dout("tid %llu maps to no valid osd\n", req->r_tid);
1252 needmap++; /* request a newer map */
1253 continue;
6f6c7006 1254 }
a40c4f10
YS
1255
1256 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1257 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1258 __register_request(osdc, req);
c89ce05e 1259 __unregister_linger_request(osdc, req);
6f6c7006 1260 }
f24e9980
SW
1261 mutex_unlock(&osdc->request_mutex);
1262
1263 if (needmap) {
1264 dout("%d requests for down osds, need new map\n", needmap);
1265 ceph_monc_request_next_osdmap(&osdc->client->monc);
1266 }
e6d50f67 1267 reset_changed_osds(osdc);
422d2cb8 1268}
6f6c7006
SW
1269
1270
f24e9980
SW
1271/*
1272 * Process updated osd map.
1273 *
1274 * The message contains any number of incremental and full maps, normally
1275 * indicating some sort of topology change in the cluster. Kick requests
1276 * off to different OSDs as needed.
1277 */
1278void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1279{
1280 void *p, *end, *next;
1281 u32 nr_maps, maplen;
1282 u32 epoch;
1283 struct ceph_osdmap *newmap = NULL, *oldmap;
1284 int err;
1285 struct ceph_fsid fsid;
1286
1287 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
1288 p = msg->front.iov_base;
1289 end = p + msg->front.iov_len;
1290
1291 /* verify fsid */
1292 ceph_decode_need(&p, end, sizeof(fsid), bad);
1293 ceph_decode_copy(&p, &fsid, sizeof(fsid));
0743304d
SW
1294 if (ceph_check_fsid(osdc->client, &fsid) < 0)
1295 return;
f24e9980
SW
1296
1297 down_write(&osdc->map_sem);
1298
1299 /* incremental maps */
1300 ceph_decode_32_safe(&p, end, nr_maps, bad);
1301 dout(" %d inc maps\n", nr_maps);
1302 while (nr_maps > 0) {
1303 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1304 epoch = ceph_decode_32(&p);
1305 maplen = ceph_decode_32(&p);
f24e9980
SW
1306 ceph_decode_need(&p, end, maplen, bad);
1307 next = p + maplen;
1308 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
1309 dout("applying incremental map %u len %d\n",
1310 epoch, maplen);
1311 newmap = osdmap_apply_incremental(&p, next,
1312 osdc->osdmap,
15d9882c 1313 &osdc->client->msgr);
f24e9980
SW
1314 if (IS_ERR(newmap)) {
1315 err = PTR_ERR(newmap);
1316 goto bad;
1317 }
30dc6381 1318 BUG_ON(!newmap);
f24e9980
SW
1319 if (newmap != osdc->osdmap) {
1320 ceph_osdmap_destroy(osdc->osdmap);
1321 osdc->osdmap = newmap;
1322 }
38d6453c 1323 kick_requests(osdc, 0);
f24e9980
SW
1324 } else {
1325 dout("ignoring incremental map %u len %d\n",
1326 epoch, maplen);
1327 }
1328 p = next;
1329 nr_maps--;
1330 }
1331 if (newmap)
1332 goto done;
1333
1334 /* full maps */
1335 ceph_decode_32_safe(&p, end, nr_maps, bad);
1336 dout(" %d full maps\n", nr_maps);
1337 while (nr_maps) {
1338 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1339 epoch = ceph_decode_32(&p);
1340 maplen = ceph_decode_32(&p);
f24e9980
SW
1341 ceph_decode_need(&p, end, maplen, bad);
1342 if (nr_maps > 1) {
1343 dout("skipping non-latest full map %u len %d\n",
1344 epoch, maplen);
1345 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
1346 dout("skipping full map %u len %d, "
1347 "older than our %u\n", epoch, maplen,
1348 osdc->osdmap->epoch);
1349 } else {
38d6453c
SW
1350 int skipped_map = 0;
1351
f24e9980
SW
1352 dout("taking full map %u len %d\n", epoch, maplen);
1353 newmap = osdmap_decode(&p, p+maplen);
1354 if (IS_ERR(newmap)) {
1355 err = PTR_ERR(newmap);
1356 goto bad;
1357 }
30dc6381 1358 BUG_ON(!newmap);
f24e9980
SW
1359 oldmap = osdc->osdmap;
1360 osdc->osdmap = newmap;
38d6453c
SW
1361 if (oldmap) {
1362 if (oldmap->epoch + 1 < newmap->epoch)
1363 skipped_map = 1;
f24e9980 1364 ceph_osdmap_destroy(oldmap);
38d6453c
SW
1365 }
1366 kick_requests(osdc, skipped_map);
f24e9980
SW
1367 }
1368 p += maplen;
1369 nr_maps--;
1370 }
1371
1372done:
1373 downgrade_write(&osdc->map_sem);
1374 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
cd634fb6
SW
1375
1376 /*
1377 * subscribe to subsequent osdmap updates if full to ensure
1378 * we find out when we are no longer full and stop returning
1379 * ENOSPC.
1380 */
1381 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1382 ceph_monc_request_next_osdmap(&osdc->client->monc);
1383
f9d25199
AE
1384 mutex_lock(&osdc->request_mutex);
1385 __send_queued(osdc);
1386 mutex_unlock(&osdc->request_mutex);
f24e9980 1387 up_read(&osdc->map_sem);
03066f23 1388 wake_up_all(&osdc->client->auth_wq);
f24e9980
SW
1389 return;
1390
1391bad:
1392 pr_err("osdc handle_map corrupt msg\n");
9ec7cab1 1393 ceph_msg_dump(msg);
f24e9980
SW
1394 up_write(&osdc->map_sem);
1395 return;
1396}
1397
a40c4f10
YS
1398/*
1399 * watch/notify callback event infrastructure
1400 *
1401 * These callbacks are used both for watch and notify operations.
1402 */
1403static void __release_event(struct kref *kref)
1404{
1405 struct ceph_osd_event *event =
1406 container_of(kref, struct ceph_osd_event, kref);
1407
1408 dout("__release_event %p\n", event);
1409 kfree(event);
1410}
1411
1412static void get_event(struct ceph_osd_event *event)
1413{
1414 kref_get(&event->kref);
1415}
1416
1417void ceph_osdc_put_event(struct ceph_osd_event *event)
1418{
1419 kref_put(&event->kref, __release_event);
1420}
1421EXPORT_SYMBOL(ceph_osdc_put_event);
1422
1423static void __insert_event(struct ceph_osd_client *osdc,
1424 struct ceph_osd_event *new)
1425{
1426 struct rb_node **p = &osdc->event_tree.rb_node;
1427 struct rb_node *parent = NULL;
1428 struct ceph_osd_event *event = NULL;
1429
1430 while (*p) {
1431 parent = *p;
1432 event = rb_entry(parent, struct ceph_osd_event, node);
1433 if (new->cookie < event->cookie)
1434 p = &(*p)->rb_left;
1435 else if (new->cookie > event->cookie)
1436 p = &(*p)->rb_right;
1437 else
1438 BUG();
1439 }
1440
1441 rb_link_node(&new->node, parent, p);
1442 rb_insert_color(&new->node, &osdc->event_tree);
1443}
1444
1445static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
1446 u64 cookie)
1447{
1448 struct rb_node **p = &osdc->event_tree.rb_node;
1449 struct rb_node *parent = NULL;
1450 struct ceph_osd_event *event = NULL;
1451
1452 while (*p) {
1453 parent = *p;
1454 event = rb_entry(parent, struct ceph_osd_event, node);
1455 if (cookie < event->cookie)
1456 p = &(*p)->rb_left;
1457 else if (cookie > event->cookie)
1458 p = &(*p)->rb_right;
1459 else
1460 return event;
1461 }
1462 return NULL;
1463}
1464
1465static void __remove_event(struct ceph_osd_event *event)
1466{
1467 struct ceph_osd_client *osdc = event->osdc;
1468
1469 if (!RB_EMPTY_NODE(&event->node)) {
1470 dout("__remove_event removed %p\n", event);
1471 rb_erase(&event->node, &osdc->event_tree);
1472 ceph_osdc_put_event(event);
1473 } else {
1474 dout("__remove_event didn't remove %p\n", event);
1475 }
1476}
1477
1478int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1479 void (*event_cb)(u64, u64, u8, void *),
3c663bbd 1480 void *data, struct ceph_osd_event **pevent)
a40c4f10
YS
1481{
1482 struct ceph_osd_event *event;
1483
1484 event = kmalloc(sizeof(*event), GFP_NOIO);
1485 if (!event)
1486 return -ENOMEM;
1487
1488 dout("create_event %p\n", event);
1489 event->cb = event_cb;
3c663bbd 1490 event->one_shot = 0;
a40c4f10
YS
1491 event->data = data;
1492 event->osdc = osdc;
1493 INIT_LIST_HEAD(&event->osd_node);
3ee5234d 1494 RB_CLEAR_NODE(&event->node);
a40c4f10
YS
1495 kref_init(&event->kref); /* one ref for us */
1496 kref_get(&event->kref); /* one ref for the caller */
a40c4f10
YS
1497
1498 spin_lock(&osdc->event_lock);
1499 event->cookie = ++osdc->event_count;
1500 __insert_event(osdc, event);
1501 spin_unlock(&osdc->event_lock);
1502
1503 *pevent = event;
1504 return 0;
1505}
1506EXPORT_SYMBOL(ceph_osdc_create_event);
1507
1508void ceph_osdc_cancel_event(struct ceph_osd_event *event)
1509{
1510 struct ceph_osd_client *osdc = event->osdc;
1511
1512 dout("cancel_event %p\n", event);
1513 spin_lock(&osdc->event_lock);
1514 __remove_event(event);
1515 spin_unlock(&osdc->event_lock);
1516 ceph_osdc_put_event(event); /* caller's */
1517}
1518EXPORT_SYMBOL(ceph_osdc_cancel_event);
1519
1520
1521static void do_event_work(struct work_struct *work)
1522{
1523 struct ceph_osd_event_work *event_work =
1524 container_of(work, struct ceph_osd_event_work, work);
1525 struct ceph_osd_event *event = event_work->event;
1526 u64 ver = event_work->ver;
1527 u64 notify_id = event_work->notify_id;
1528 u8 opcode = event_work->opcode;
1529
1530 dout("do_event_work completing %p\n", event);
1531 event->cb(ver, notify_id, opcode, event->data);
a40c4f10
YS
1532 dout("do_event_work completed %p\n", event);
1533 ceph_osdc_put_event(event);
1534 kfree(event_work);
1535}
1536
1537
1538/*
1539 * Process osd watch notifications
1540 */
3c663bbd
AE
1541static void handle_watch_notify(struct ceph_osd_client *osdc,
1542 struct ceph_msg *msg)
a40c4f10
YS
1543{
1544 void *p, *end;
1545 u8 proto_ver;
1546 u64 cookie, ver, notify_id;
1547 u8 opcode;
1548 struct ceph_osd_event *event;
1549 struct ceph_osd_event_work *event_work;
1550
1551 p = msg->front.iov_base;
1552 end = p + msg->front.iov_len;
1553
1554 ceph_decode_8_safe(&p, end, proto_ver, bad);
1555 ceph_decode_8_safe(&p, end, opcode, bad);
1556 ceph_decode_64_safe(&p, end, cookie, bad);
1557 ceph_decode_64_safe(&p, end, ver, bad);
1558 ceph_decode_64_safe(&p, end, notify_id, bad);
1559
1560 spin_lock(&osdc->event_lock);
1561 event = __find_event(osdc, cookie);
1562 if (event) {
3c663bbd 1563 BUG_ON(event->one_shot);
a40c4f10 1564 get_event(event);
a40c4f10
YS
1565 }
1566 spin_unlock(&osdc->event_lock);
1567 dout("handle_watch_notify cookie %lld ver %lld event %p\n",
1568 cookie, ver, event);
1569 if (event) {
1570 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
a40c4f10
YS
1571 if (!event_work) {
1572 dout("ERROR: could not allocate event_work\n");
1573 goto done_err;
1574 }
6b0ae409 1575 INIT_WORK(&event_work->work, do_event_work);
a40c4f10
YS
1576 event_work->event = event;
1577 event_work->ver = ver;
1578 event_work->notify_id = notify_id;
1579 event_work->opcode = opcode;
1580 if (!queue_work(osdc->notify_wq, &event_work->work)) {
1581 dout("WARNING: failed to queue notify event work\n");
1582 goto done_err;
1583 }
1584 }
1585
1586 return;
1587
1588done_err:
a40c4f10
YS
1589 ceph_osdc_put_event(event);
1590 return;
1591
1592bad:
1593 pr_err("osdc handle_watch_notify corrupt msg\n");
1594 return;
1595}
1596
f24e9980
SW
1597/*
1598 * Register request, send initial attempt.
1599 */
1600int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1601 struct ceph_osd_request *req,
1602 bool nofail)
1603{
c1ea8823 1604 int rc = 0;
f24e9980
SW
1605
1606 req->r_request->pages = req->r_pages;
1607 req->r_request->nr_pages = req->r_num_pages;
68b4476b
YS
1608#ifdef CONFIG_BLOCK
1609 req->r_request->bio = req->r_bio;
1610#endif
c885837f 1611 req->r_request->trail = &req->r_trail;
f24e9980
SW
1612
1613 register_request(osdc, req);
1614
1615 down_read(&osdc->map_sem);
1616 mutex_lock(&osdc->request_mutex);
c1ea8823
SW
1617 /*
1618 * a racing kick_requests() may have sent the message for us
1619 * while we dropped request_mutex above, so only send now if
1620 * the request still han't been touched yet.
1621 */
1622 if (req->r_sent == 0) {
38d6453c 1623 rc = __map_request(osdc, req, 0);
9d6fcb08
SW
1624 if (rc < 0) {
1625 if (nofail) {
1626 dout("osdc_start_request failed map, "
1627 " will retry %lld\n", req->r_tid);
1628 rc = 0;
1629 }
234af26f 1630 goto out_unlock;
9d6fcb08 1631 }
6f6c7006
SW
1632 if (req->r_osd == NULL) {
1633 dout("send_request %p no up osds in pg\n", req);
1634 ceph_monc_request_next_osdmap(&osdc->client->monc);
1635 } else {
56e925b6 1636 __send_request(osdc, req);
f24e9980 1637 }
56e925b6 1638 rc = 0;
f24e9980 1639 }
234af26f
DC
1640
1641out_unlock:
f24e9980
SW
1642 mutex_unlock(&osdc->request_mutex);
1643 up_read(&osdc->map_sem);
1644 return rc;
1645}
3d14c5d2 1646EXPORT_SYMBOL(ceph_osdc_start_request);
f24e9980
SW
1647
1648/*
1649 * wait for a request to complete
1650 */
1651int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1652 struct ceph_osd_request *req)
1653{
1654 int rc;
1655
1656 rc = wait_for_completion_interruptible(&req->r_completion);
1657 if (rc < 0) {
1658 mutex_lock(&osdc->request_mutex);
1659 __cancel_request(req);
529cfcc4 1660 __unregister_request(osdc, req);
f24e9980 1661 mutex_unlock(&osdc->request_mutex);
25845472 1662 complete_request(req);
529cfcc4 1663 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
f24e9980
SW
1664 return rc;
1665 }
1666
1667 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1668 return req->r_result;
1669}
3d14c5d2 1670EXPORT_SYMBOL(ceph_osdc_wait_request);
f24e9980
SW
1671
1672/*
1673 * sync - wait for all in-flight requests to flush. avoid starvation.
1674 */
1675void ceph_osdc_sync(struct ceph_osd_client *osdc)
1676{
1677 struct ceph_osd_request *req;
1678 u64 last_tid, next_tid = 0;
1679
1680 mutex_lock(&osdc->request_mutex);
1681 last_tid = osdc->last_tid;
1682 while (1) {
1683 req = __lookup_request_ge(osdc, next_tid);
1684 if (!req)
1685 break;
1686 if (req->r_tid > last_tid)
1687 break;
1688
1689 next_tid = req->r_tid + 1;
1690 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
1691 continue;
1692
1693 ceph_osdc_get_request(req);
1694 mutex_unlock(&osdc->request_mutex);
1695 dout("sync waiting on tid %llu (last is %llu)\n",
1696 req->r_tid, last_tid);
1697 wait_for_completion(&req->r_safe_completion);
1698 mutex_lock(&osdc->request_mutex);
1699 ceph_osdc_put_request(req);
1700 }
1701 mutex_unlock(&osdc->request_mutex);
1702 dout("sync done (thru tid %llu)\n", last_tid);
1703}
3d14c5d2 1704EXPORT_SYMBOL(ceph_osdc_sync);
f24e9980
SW
1705
1706/*
1707 * init, shutdown
1708 */
1709int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1710{
1711 int err;
1712
1713 dout("init\n");
1714 osdc->client = client;
1715 osdc->osdmap = NULL;
1716 init_rwsem(&osdc->map_sem);
1717 init_completion(&osdc->map_waiters);
1718 osdc->last_requested_map = 0;
1719 mutex_init(&osdc->request_mutex);
f24e9980
SW
1720 osdc->last_tid = 0;
1721 osdc->osds = RB_ROOT;
f5a2041b 1722 INIT_LIST_HEAD(&osdc->osd_lru);
f24e9980 1723 osdc->requests = RB_ROOT;
422d2cb8 1724 INIT_LIST_HEAD(&osdc->req_lru);
6f6c7006
SW
1725 INIT_LIST_HEAD(&osdc->req_unsent);
1726 INIT_LIST_HEAD(&osdc->req_notarget);
a40c4f10 1727 INIT_LIST_HEAD(&osdc->req_linger);
f24e9980
SW
1728 osdc->num_requests = 0;
1729 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
f5a2041b 1730 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
a40c4f10
YS
1731 spin_lock_init(&osdc->event_lock);
1732 osdc->event_tree = RB_ROOT;
1733 osdc->event_count = 0;
f5a2041b
YS
1734
1735 schedule_delayed_work(&osdc->osds_timeout_work,
3d14c5d2 1736 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
f24e9980 1737
5f44f142 1738 err = -ENOMEM;
f24e9980
SW
1739 osdc->req_mempool = mempool_create_kmalloc_pool(10,
1740 sizeof(struct ceph_osd_request));
1741 if (!osdc->req_mempool)
5f44f142 1742 goto out;
f24e9980 1743
d50b409f
SW
1744 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
1745 OSD_OP_FRONT_LEN, 10, true,
4f48280e 1746 "osd_op");
f24e9980 1747 if (err < 0)
5f44f142 1748 goto out_mempool;
d50b409f 1749 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4f48280e
SW
1750 OSD_OPREPLY_FRONT_LEN, 10, true,
1751 "osd_op_reply");
c16e7869
SW
1752 if (err < 0)
1753 goto out_msgpool;
a40c4f10
YS
1754
1755 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
1756 if (IS_ERR(osdc->notify_wq)) {
1757 err = PTR_ERR(osdc->notify_wq);
1758 osdc->notify_wq = NULL;
1759 goto out_msgpool;
1760 }
f24e9980 1761 return 0;
5f44f142 1762
c16e7869
SW
1763out_msgpool:
1764 ceph_msgpool_destroy(&osdc->msgpool_op);
5f44f142
SW
1765out_mempool:
1766 mempool_destroy(osdc->req_mempool);
1767out:
1768 return err;
f24e9980
SW
1769}
1770
1771void ceph_osdc_stop(struct ceph_osd_client *osdc)
1772{
a40c4f10
YS
1773 flush_workqueue(osdc->notify_wq);
1774 destroy_workqueue(osdc->notify_wq);
f24e9980 1775 cancel_delayed_work_sync(&osdc->timeout_work);
f5a2041b 1776 cancel_delayed_work_sync(&osdc->osds_timeout_work);
f24e9980
SW
1777 if (osdc->osdmap) {
1778 ceph_osdmap_destroy(osdc->osdmap);
1779 osdc->osdmap = NULL;
1780 }
aca420bc 1781 remove_all_osds(osdc);
f24e9980
SW
1782 mempool_destroy(osdc->req_mempool);
1783 ceph_msgpool_destroy(&osdc->msgpool_op);
c16e7869 1784 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
f24e9980
SW
1785}
1786
1787/*
1788 * Read some contiguous pages. If we cross a stripe boundary, shorten
1789 * *plen. Return number of bytes read, or error.
1790 */
1791int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1792 struct ceph_vino vino, struct ceph_file_layout *layout,
1793 u64 off, u64 *plen,
1794 u32 truncate_seq, u64 truncate_size,
b7495fc2 1795 struct page **pages, int num_pages, int page_align)
f24e9980
SW
1796{
1797 struct ceph_osd_request *req;
1798 int rc = 0;
1799
1800 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
1801 vino.snap, off, *plen);
1802 req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
1803 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
1804 NULL, 0, truncate_seq, truncate_size, NULL,
a3bea47e 1805 false, page_align);
6816282d
SW
1806 if (IS_ERR(req))
1807 return PTR_ERR(req);
f24e9980
SW
1808
1809 /* it may be a short read due to an object boundary */
1810 req->r_pages = pages;
f24e9980 1811
b7495fc2
SW
1812 dout("readpages final extent is %llu~%llu (%d pages align %d)\n",
1813 off, *plen, req->r_num_pages, page_align);
f24e9980
SW
1814
1815 rc = ceph_osdc_start_request(osdc, req, false);
1816 if (!rc)
1817 rc = ceph_osdc_wait_request(osdc, req);
1818
1819 ceph_osdc_put_request(req);
1820 dout("readpages result %d\n", rc);
1821 return rc;
1822}
3d14c5d2 1823EXPORT_SYMBOL(ceph_osdc_readpages);
f24e9980
SW
1824
1825/*
1826 * do a synchronous write on N pages
1827 */
1828int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1829 struct ceph_file_layout *layout,
1830 struct ceph_snap_context *snapc,
1831 u64 off, u64 len,
1832 u32 truncate_seq, u64 truncate_size,
1833 struct timespec *mtime,
24808826 1834 struct page **pages, int num_pages)
f24e9980
SW
1835{
1836 struct ceph_osd_request *req;
1837 int rc = 0;
b7495fc2 1838 int page_align = off & ~PAGE_MASK;
f24e9980
SW
1839
1840 BUG_ON(vino.snap != CEPH_NOSNAP);
1841 req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
1842 CEPH_OSD_OP_WRITE,
24808826 1843 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
fbf8685f 1844 snapc, 0,
f24e9980 1845 truncate_seq, truncate_size, mtime,
a3bea47e 1846 true, page_align);
6816282d
SW
1847 if (IS_ERR(req))
1848 return PTR_ERR(req);
f24e9980
SW
1849
1850 /* it may be a short write due to an object boundary */
1851 req->r_pages = pages;
f24e9980
SW
1852 dout("writepages %llu~%llu (%d pages)\n", off, len,
1853 req->r_num_pages);
1854
87f979d3 1855 rc = ceph_osdc_start_request(osdc, req, true);
f24e9980
SW
1856 if (!rc)
1857 rc = ceph_osdc_wait_request(osdc, req);
1858
1859 ceph_osdc_put_request(req);
1860 if (rc == 0)
1861 rc = len;
1862 dout("writepages result %d\n", rc);
1863 return rc;
1864}
3d14c5d2 1865EXPORT_SYMBOL(ceph_osdc_writepages);
f24e9980
SW
1866
1867/*
1868 * handle incoming message
1869 */
1870static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1871{
1872 struct ceph_osd *osd = con->private;
32c895e7 1873 struct ceph_osd_client *osdc;
f24e9980
SW
1874 int type = le16_to_cpu(msg->hdr.type);
1875
1876 if (!osd)
4a32f93d 1877 goto out;
32c895e7 1878 osdc = osd->o_osdc;
f24e9980
SW
1879
1880 switch (type) {
1881 case CEPH_MSG_OSD_MAP:
1882 ceph_osdc_handle_map(osdc, msg);
1883 break;
1884 case CEPH_MSG_OSD_OPREPLY:
350b1c32 1885 handle_reply(osdc, msg, con);
f24e9980 1886 break;
a40c4f10
YS
1887 case CEPH_MSG_WATCH_NOTIFY:
1888 handle_watch_notify(osdc, msg);
1889 break;
f24e9980
SW
1890
1891 default:
1892 pr_err("received unknown message type %d %s\n", type,
1893 ceph_msg_type_name(type));
1894 }
4a32f93d 1895out:
f24e9980
SW
1896 ceph_msg_put(msg);
1897}
1898
5b3a4db3 1899/*
21b667f6
SW
1900 * lookup and return message for incoming reply. set up reply message
1901 * pages.
5b3a4db3
SW
1902 */
1903static struct ceph_msg *get_reply(struct ceph_connection *con,
2450418c
YS
1904 struct ceph_msg_header *hdr,
1905 int *skip)
f24e9980
SW
1906{
1907 struct ceph_osd *osd = con->private;
1908 struct ceph_osd_client *osdc = osd->o_osdc;
2450418c 1909 struct ceph_msg *m;
0547a9b3 1910 struct ceph_osd_request *req;
5b3a4db3
SW
1911 int front = le32_to_cpu(hdr->front_len);
1912 int data_len = le32_to_cpu(hdr->data_len);
0547a9b3 1913 u64 tid;
f24e9980 1914
0547a9b3
YS
1915 tid = le64_to_cpu(hdr->tid);
1916 mutex_lock(&osdc->request_mutex);
1917 req = __lookup_request(osdc, tid);
1918 if (!req) {
1919 *skip = 1;
1920 m = NULL;
756a16a5
SW
1921 dout("get_reply unknown tid %llu from osd%d\n", tid,
1922 osd->o_osd);
0547a9b3
YS
1923 goto out;
1924 }
c16e7869
SW
1925
1926 if (req->r_con_filling_msg) {
8921d114 1927 dout("%s revoking msg %p from old con %p\n", __func__,
c16e7869 1928 req->r_reply, req->r_con_filling_msg);
8921d114 1929 ceph_msg_revoke_incoming(req->r_reply);
0d47766f 1930 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
6f46cb29 1931 req->r_con_filling_msg = NULL;
0547a9b3
YS
1932 }
1933
c16e7869
SW
1934 if (front > req->r_reply->front.iov_len) {
1935 pr_warning("get_reply front %d > preallocated %d\n",
1936 front, (int)req->r_reply->front.iov_len);
b61c2763 1937 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
a79832f2 1938 if (!m)
c16e7869
SW
1939 goto out;
1940 ceph_msg_put(req->r_reply);
1941 req->r_reply = m;
1942 }
1943 m = ceph_msg_get(req->r_reply);
1944
0547a9b3 1945 if (data_len > 0) {
b7495fc2 1946 int want = calc_pages_for(req->r_page_alignment, data_len);
21b667f6 1947
9cbb1d72 1948 if (req->r_pages && unlikely(req->r_num_pages < want)) {
9bb0ce2b
SW
1949 pr_warning("tid %lld reply has %d bytes %d pages, we"
1950 " had only %d pages ready\n", tid, data_len,
1951 want, req->r_num_pages);
0547a9b3
YS
1952 *skip = 1;
1953 ceph_msg_put(m);
a79832f2 1954 m = NULL;
21b667f6 1955 goto out;
0547a9b3 1956 }
21b667f6
SW
1957 m->pages = req->r_pages;
1958 m->nr_pages = req->r_num_pages;
c5c6b19d 1959 m->page_alignment = req->r_page_alignment;
68b4476b
YS
1960#ifdef CONFIG_BLOCK
1961 m->bio = req->r_bio;
1962#endif
0547a9b3 1963 }
5b3a4db3 1964 *skip = 0;
0d47766f 1965 req->r_con_filling_msg = con->ops->get(con);
c16e7869 1966 dout("get_reply tid %lld %p\n", tid, m);
0547a9b3
YS
1967
1968out:
1969 mutex_unlock(&osdc->request_mutex);
2450418c 1970 return m;
5b3a4db3
SW
1971
1972}
1973
1974static struct ceph_msg *alloc_msg(struct ceph_connection *con,
1975 struct ceph_msg_header *hdr,
1976 int *skip)
1977{
1978 struct ceph_osd *osd = con->private;
1979 int type = le16_to_cpu(hdr->type);
1980 int front = le32_to_cpu(hdr->front_len);
1981
1c20f2d2 1982 *skip = 0;
5b3a4db3
SW
1983 switch (type) {
1984 case CEPH_MSG_OSD_MAP:
a40c4f10 1985 case CEPH_MSG_WATCH_NOTIFY:
b61c2763 1986 return ceph_msg_new(type, front, GFP_NOFS, false);
5b3a4db3
SW
1987 case CEPH_MSG_OSD_OPREPLY:
1988 return get_reply(con, hdr, skip);
1989 default:
1990 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
1991 osd->o_osd);
1992 *skip = 1;
1993 return NULL;
1994 }
f24e9980
SW
1995}
1996
1997/*
1998 * Wrappers to refcount containing ceph_osd struct
1999 */
2000static struct ceph_connection *get_osd_con(struct ceph_connection *con)
2001{
2002 struct ceph_osd *osd = con->private;
2003 if (get_osd(osd))
2004 return con;
2005 return NULL;
2006}
2007
2008static void put_osd_con(struct ceph_connection *con)
2009{
2010 struct ceph_osd *osd = con->private;
2011 put_osd(osd);
2012}
2013
4e7a5dcd
SW
2014/*
2015 * authentication
2016 */
a3530df3
AE
2017/*
2018 * Note: returned pointer is the address of a structure that's
2019 * managed separately. Caller must *not* attempt to free it.
2020 */
2021static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
8f43fb53 2022 int *proto, int force_new)
4e7a5dcd
SW
2023{
2024 struct ceph_osd *o = con->private;
2025 struct ceph_osd_client *osdc = o->o_osdc;
2026 struct ceph_auth_client *ac = osdc->client->monc.auth;
74f1869f 2027 struct ceph_auth_handshake *auth = &o->o_auth;
4e7a5dcd 2028
74f1869f 2029 if (force_new && auth->authorizer) {
a255651d
AE
2030 if (ac->ops && ac->ops->destroy_authorizer)
2031 ac->ops->destroy_authorizer(ac, auth->authorizer);
74f1869f
AE
2032 auth->authorizer = NULL;
2033 }
a255651d 2034 if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
a3530df3
AE
2035 int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2036 auth);
4e7a5dcd 2037 if (ret)
a3530df3 2038 return ERR_PTR(ret);
4e7a5dcd 2039 }
4e7a5dcd 2040 *proto = ac->protocol;
74f1869f 2041
a3530df3 2042 return auth;
4e7a5dcd
SW
2043}
2044
2045
2046static int verify_authorizer_reply(struct ceph_connection *con, int len)
2047{
2048 struct ceph_osd *o = con->private;
2049 struct ceph_osd_client *osdc = o->o_osdc;
2050 struct ceph_auth_client *ac = osdc->client->monc.auth;
2051
a255651d
AE
2052 /*
2053 * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
2054 * XXX which do we do: succeed or fail?
2055 */
6c4a1915 2056 return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
4e7a5dcd
SW
2057}
2058
9bd2e6f8
SW
2059static int invalidate_authorizer(struct ceph_connection *con)
2060{
2061 struct ceph_osd *o = con->private;
2062 struct ceph_osd_client *osdc = o->o_osdc;
2063 struct ceph_auth_client *ac = osdc->client->monc.auth;
2064
a255651d 2065 if (ac->ops && ac->ops->invalidate_authorizer)
9bd2e6f8
SW
2066 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
2067
2068 return ceph_monc_validate_auth(&osdc->client->monc);
2069}
4e7a5dcd 2070
9e32789f 2071static const struct ceph_connection_operations osd_con_ops = {
f24e9980
SW
2072 .get = get_osd_con,
2073 .put = put_osd_con,
2074 .dispatch = dispatch,
4e7a5dcd
SW
2075 .get_authorizer = get_authorizer,
2076 .verify_authorizer_reply = verify_authorizer_reply,
9bd2e6f8 2077 .invalidate_authorizer = invalidate_authorizer,
f24e9980 2078 .alloc_msg = alloc_msg,
81b024e7 2079 .fault = osd_reset,
f24e9980 2080};