libceph: distinguish page and bio requests
[linux-2.6-block.git] / net / ceph / osd_client.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
f24e9980 2
3d14c5d2 3#include <linux/module.h>
f24e9980
SW
4#include <linux/err.h>
5#include <linux/highmem.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/slab.h>
9#include <linux/uaccess.h>
68b4476b
YS
10#ifdef CONFIG_BLOCK
11#include <linux/bio.h>
12#endif
f24e9980 13
3d14c5d2
YS
14#include <linux/ceph/libceph.h>
15#include <linux/ceph/osd_client.h>
16#include <linux/ceph/messenger.h>
17#include <linux/ceph/decode.h>
18#include <linux/ceph/auth.h>
19#include <linux/ceph/pagelist.h>
f24e9980 20
c16e7869
SW
21#define OSD_OP_FRONT_LEN 4096
22#define OSD_OPREPLY_FRONT_LEN 512
0d59ab81 23
9e32789f 24static const struct ceph_connection_operations osd_con_ops;
f24e9980 25
f9d25199 26static void __send_queued(struct ceph_osd_client *osdc);
6f6c7006 27static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
a40c4f10
YS
28static void __register_request(struct ceph_osd_client *osdc,
29 struct ceph_osd_request *req);
30static void __unregister_linger_request(struct ceph_osd_client *osdc,
31 struct ceph_osd_request *req);
56e925b6
SW
32static void __send_request(struct ceph_osd_client *osdc,
33 struct ceph_osd_request *req);
f24e9980 34
68b4476b
YS
35static int op_has_extent(int op)
36{
37 return (op == CEPH_OSD_OP_READ ||
38 op == CEPH_OSD_OP_WRITE);
39}
40
f24e9980
SW
41/*
42 * Implement client access to distributed object storage cluster.
43 *
44 * All data objects are stored within a cluster/cloud of OSDs, or
45 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
46 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
47 * remote daemons serving up and coordinating consistent and safe
48 * access to storage.
49 *
50 * Cluster membership and the mapping of data objects onto storage devices
51 * are described by the osd map.
52 *
53 * We keep track of pending OSD requests (read, write), resubmit
54 * requests to different OSDs when the cluster topology/data layout
55 * change, or retry the affected requests when the communications
56 * channel with an OSD is reset.
57 */
58
59/*
60 * calculate the mapping of a file extent onto an object, and fill out the
61 * request accordingly. shorten extent as necessary if it crosses an
62 * object boundary.
63 *
64 * fill osd op in request message.
65 */
dbe0fc41 66static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
47a05811 67 struct ceph_osd_req_op *op, u64 *bno)
f24e9980 68{
60e56f13 69 u64 orig_len = *plen;
60e56f13
AE
70 u64 objoff = 0;
71 u64 objlen = 0;
d63b77f4 72 int r;
f24e9980 73
60e56f13 74 /* object extent? */
47a05811 75 r = ceph_calc_file_object_mapping(layout, off, orig_len, bno,
60e56f13 76 &objoff, &objlen);
d63b77f4
SW
77 if (r < 0)
78 return r;
60e56f13
AE
79 if (objlen < orig_len) {
80 *plen = objlen;
81 dout(" skipping last %llu, final file extent %llu~%llu\n",
82 orig_len - *plen, off, *plen);
83 }
84
85 if (op_has_extent(op->op)) {
86 u32 osize = le32_to_cpu(layout->fl_object_size);
87 op->extent.offset = objoff;
88 op->extent.length = objlen;
89 if (op->extent.truncate_size <= off - objoff) {
90 op->extent.truncate_size = 0;
91 } else {
92 op->extent.truncate_size -= off - objoff;
93 if (op->extent.truncate_size > osize)
94 op->extent.truncate_size = osize;
95 }
96 }
60e56f13
AE
97 if (op->op == CEPH_OSD_OP_WRITE)
98 op->payload_len = *plen;
99
60cf5992 100 dout("calc_layout bno=%llx %llu~%llu\n", *bno, objoff, objlen);
f24e9980 101
3ff5f385 102 return 0;
f24e9980
SW
103}
104
f24e9980
SW
105/*
106 * requests
107 */
415e49a9 108void ceph_osdc_release_request(struct kref *kref)
f24e9980 109{
415e49a9
SW
110 struct ceph_osd_request *req = container_of(kref,
111 struct ceph_osd_request,
112 r_kref);
113
114 if (req->r_request)
115 ceph_msg_put(req->r_request);
0d59ab81 116 if (req->r_con_filling_msg) {
9cbb1d72
AE
117 dout("%s revoking msg %p from con %p\n", __func__,
118 req->r_reply, req->r_con_filling_msg);
8921d114 119 ceph_msg_revoke_incoming(req->r_reply);
0d47766f 120 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
9cbb1d72 121 req->r_con_filling_msg = NULL;
350b1c32 122 }
ab8cb34a
AE
123 if (req->r_reply)
124 ceph_msg_put(req->r_reply);
2ac2b7a6
AE
125 if (req->r_data.type == CEPH_OSD_DATA_TYPE_PAGES &&
126 req->r_data.own_pages)
2794a82a
AE
127 ceph_release_page_vector(req->r_data.pages,
128 req->r_data.num_pages);
415e49a9 129 ceph_put_snap_context(req->r_snapc);
c885837f 130 ceph_pagelist_release(&req->r_trail);
415e49a9
SW
131 if (req->r_mempool)
132 mempool_free(req, req->r_osdc->req_mempool);
133 else
134 kfree(req);
f24e9980 135}
3d14c5d2 136EXPORT_SYMBOL(ceph_osdc_release_request);
68b4476b 137
3499e8a5 138struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
f24e9980 139 struct ceph_snap_context *snapc,
1b83bef2 140 unsigned int num_ops,
3499e8a5 141 bool use_mempool,
54a54007 142 gfp_t gfp_flags)
f24e9980
SW
143{
144 struct ceph_osd_request *req;
145 struct ceph_msg *msg;
1b83bef2
SW
146 size_t msg_size;
147
148 msg_size = 4 + 4 + 8 + 8 + 4+8;
149 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
150 msg_size += 1 + 8 + 4 + 4; /* pg_t */
151 msg_size += 4 + MAX_OBJ_NAME_SIZE;
152 msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
153 msg_size += 8; /* snapid */
154 msg_size += 8; /* snap_seq */
155 msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
156 msg_size += 4;
f24e9980
SW
157
158 if (use_mempool) {
3499e8a5 159 req = mempool_alloc(osdc->req_mempool, gfp_flags);
f24e9980
SW
160 memset(req, 0, sizeof(*req));
161 } else {
3499e8a5 162 req = kzalloc(sizeof(*req), gfp_flags);
f24e9980
SW
163 }
164 if (req == NULL)
a79832f2 165 return NULL;
f24e9980 166
f24e9980
SW
167 req->r_osdc = osdc;
168 req->r_mempool = use_mempool;
68b4476b 169
415e49a9 170 kref_init(&req->r_kref);
f24e9980
SW
171 init_completion(&req->r_completion);
172 init_completion(&req->r_safe_completion);
a978fa20 173 RB_CLEAR_NODE(&req->r_node);
f24e9980 174 INIT_LIST_HEAD(&req->r_unsafe_item);
a40c4f10
YS
175 INIT_LIST_HEAD(&req->r_linger_item);
176 INIT_LIST_HEAD(&req->r_linger_osd);
935b639a 177 INIT_LIST_HEAD(&req->r_req_lru_item);
cd43045c
SW
178 INIT_LIST_HEAD(&req->r_osd_item);
179
c16e7869
SW
180 /* create reply message */
181 if (use_mempool)
182 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
183 else
184 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
b61c2763 185 OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
a79832f2 186 if (!msg) {
c16e7869 187 ceph_osdc_put_request(req);
a79832f2 188 return NULL;
c16e7869
SW
189 }
190 req->r_reply = msg;
191
2ac2b7a6 192 req->r_data.type = CEPH_OSD_DATA_TYPE_NONE;
c885837f 193 ceph_pagelist_init(&req->r_trail);
d50b409f 194
c16e7869 195 /* create request message; allow space for oid */
f24e9980 196 if (use_mempool)
8f3bc053 197 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
f24e9980 198 else
b61c2763 199 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
a79832f2 200 if (!msg) {
f24e9980 201 ceph_osdc_put_request(req);
a79832f2 202 return NULL;
f24e9980 203 }
68b4476b 204
f24e9980 205 memset(msg->front.iov_base, 0, msg->front.iov_len);
3499e8a5
YS
206
207 req->r_request = msg;
3499e8a5
YS
208
209 return req;
210}
3d14c5d2 211EXPORT_SYMBOL(ceph_osdc_alloc_request);
3499e8a5 212
68b4476b
YS
213static void osd_req_encode_op(struct ceph_osd_request *req,
214 struct ceph_osd_op *dst,
215 struct ceph_osd_req_op *src)
216{
217 dst->op = cpu_to_le16(src->op);
218
065a68f9 219 switch (src->op) {
fbfab539
AE
220 case CEPH_OSD_OP_STAT:
221 break;
68b4476b
YS
222 case CEPH_OSD_OP_READ:
223 case CEPH_OSD_OP_WRITE:
224 dst->extent.offset =
225 cpu_to_le64(src->extent.offset);
226 dst->extent.length =
227 cpu_to_le64(src->extent.length);
228 dst->extent.truncate_size =
229 cpu_to_le64(src->extent.truncate_size);
230 dst->extent.truncate_seq =
231 cpu_to_le32(src->extent.truncate_seq);
232 break;
ae1533b6 233 case CEPH_OSD_OP_CALL:
ae1533b6
YS
234 dst->cls.class_len = src->cls.class_len;
235 dst->cls.method_len = src->cls.method_len;
236 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
237
c885837f 238 ceph_pagelist_append(&req->r_trail, src->cls.class_name,
ae1533b6 239 src->cls.class_len);
c885837f 240 ceph_pagelist_append(&req->r_trail, src->cls.method_name,
ae1533b6 241 src->cls.method_len);
c885837f 242 ceph_pagelist_append(&req->r_trail, src->cls.indata,
ae1533b6
YS
243 src->cls.indata_len);
244 break;
68b4476b
YS
245 case CEPH_OSD_OP_STARTSYNC:
246 break;
a40c4f10
YS
247 case CEPH_OSD_OP_NOTIFY_ACK:
248 case CEPH_OSD_OP_WATCH:
249 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
250 dst->watch.ver = cpu_to_le64(src->watch.ver);
251 dst->watch.flag = src->watch.flag;
252 break;
68b4476b 253 default:
8f63ca2d 254 pr_err("unrecognized osd opcode %d\n", src->op);
68b4476b
YS
255 WARN_ON(1);
256 break;
4c46459c
AE
257 case CEPH_OSD_OP_MAPEXT:
258 case CEPH_OSD_OP_MASKTRUNC:
259 case CEPH_OSD_OP_SPARSE_READ:
a9f36c3e 260 case CEPH_OSD_OP_NOTIFY:
4c46459c
AE
261 case CEPH_OSD_OP_ASSERT_VER:
262 case CEPH_OSD_OP_WRITEFULL:
263 case CEPH_OSD_OP_TRUNCATE:
264 case CEPH_OSD_OP_ZERO:
265 case CEPH_OSD_OP_DELETE:
266 case CEPH_OSD_OP_APPEND:
267 case CEPH_OSD_OP_SETTRUNC:
268 case CEPH_OSD_OP_TRIMTRUNC:
269 case CEPH_OSD_OP_TMAPUP:
270 case CEPH_OSD_OP_TMAPPUT:
271 case CEPH_OSD_OP_TMAPGET:
272 case CEPH_OSD_OP_CREATE:
a9f36c3e 273 case CEPH_OSD_OP_ROLLBACK:
4c46459c
AE
274 case CEPH_OSD_OP_OMAPGETKEYS:
275 case CEPH_OSD_OP_OMAPGETVALS:
276 case CEPH_OSD_OP_OMAPGETHEADER:
277 case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
278 case CEPH_OSD_OP_MODE_RD:
279 case CEPH_OSD_OP_OMAPSETVALS:
280 case CEPH_OSD_OP_OMAPSETHEADER:
281 case CEPH_OSD_OP_OMAPCLEAR:
282 case CEPH_OSD_OP_OMAPRMKEYS:
283 case CEPH_OSD_OP_OMAP_CMP:
284 case CEPH_OSD_OP_CLONERANGE:
285 case CEPH_OSD_OP_ASSERT_SRC_VERSION:
286 case CEPH_OSD_OP_SRC_CMPXATTR:
a9f36c3e 287 case CEPH_OSD_OP_GETXATTR:
4c46459c 288 case CEPH_OSD_OP_GETXATTRS:
a9f36c3e
AE
289 case CEPH_OSD_OP_CMPXATTR:
290 case CEPH_OSD_OP_SETXATTR:
4c46459c
AE
291 case CEPH_OSD_OP_SETXATTRS:
292 case CEPH_OSD_OP_RESETXATTRS:
293 case CEPH_OSD_OP_RMXATTR:
294 case CEPH_OSD_OP_PULL:
295 case CEPH_OSD_OP_PUSH:
296 case CEPH_OSD_OP_BALANCEREADS:
297 case CEPH_OSD_OP_UNBALANCEREADS:
298 case CEPH_OSD_OP_SCRUB:
299 case CEPH_OSD_OP_SCRUB_RESERVE:
300 case CEPH_OSD_OP_SCRUB_UNRESERVE:
301 case CEPH_OSD_OP_SCRUB_STOP:
302 case CEPH_OSD_OP_SCRUB_MAP:
303 case CEPH_OSD_OP_WRLOCK:
304 case CEPH_OSD_OP_WRUNLOCK:
305 case CEPH_OSD_OP_RDLOCK:
306 case CEPH_OSD_OP_RDUNLOCK:
307 case CEPH_OSD_OP_UPLOCK:
308 case CEPH_OSD_OP_DNLOCK:
309 case CEPH_OSD_OP_PGLS:
310 case CEPH_OSD_OP_PGLS_FILTER:
311 pr_err("unsupported osd opcode %s\n",
8f63ca2d 312 ceph_osd_op_name(src->op));
4c46459c
AE
313 WARN_ON(1);
314 break;
68b4476b
YS
315 }
316 dst->payload_len = cpu_to_le32(src->payload_len);
317}
318
3499e8a5
YS
319/*
320 * build new request AND message
321 *
322 */
323void ceph_osdc_build_request(struct ceph_osd_request *req,
1b83bef2 324 u64 off, u64 len, unsigned int num_ops,
68b4476b 325 struct ceph_osd_req_op *src_ops,
4d6b250b 326 struct ceph_snap_context *snapc, u64 snap_id,
af77f26c 327 struct timespec *mtime)
3499e8a5
YS
328{
329 struct ceph_msg *msg = req->r_request;
68b4476b 330 struct ceph_osd_req_op *src_op;
3499e8a5 331 void *p;
1b83bef2 332 size_t msg_size;
3499e8a5 333 int flags = req->r_flags;
f44246e3 334 u64 data_len;
68b4476b 335 int i;
3499e8a5 336
1b83bef2
SW
337 req->r_num_ops = num_ops;
338 req->r_snapid = snap_id;
f24e9980
SW
339 req->r_snapc = ceph_get_snap_context(snapc);
340
1b83bef2
SW
341 /* encode request */
342 msg->hdr.version = cpu_to_le16(4);
343
344 p = msg->front.iov_base;
345 ceph_encode_32(&p, 1); /* client_inc is always 1 */
346 req->r_request_osdmap_epoch = p;
347 p += 4;
348 req->r_request_flags = p;
349 p += 4;
350 if (req->r_flags & CEPH_OSD_FLAG_WRITE)
351 ceph_encode_timespec(p, mtime);
352 p += sizeof(struct ceph_timespec);
353 req->r_request_reassert_version = p;
354 p += sizeof(struct ceph_eversion); /* will get filled in */
355
356 /* oloc */
357 ceph_encode_8(&p, 4);
358 ceph_encode_8(&p, 4);
359 ceph_encode_32(&p, 8 + 4 + 4);
360 req->r_request_pool = p;
361 p += 8;
362 ceph_encode_32(&p, -1); /* preferred */
363 ceph_encode_32(&p, 0); /* key len */
364
365 ceph_encode_8(&p, 1);
366 req->r_request_pgid = p;
367 p += 8 + 4;
368 ceph_encode_32(&p, -1); /* preferred */
369
370 /* oid */
371 ceph_encode_32(&p, req->r_oid_len);
af77f26c 372 memcpy(p, req->r_oid, req->r_oid_len);
1b83bef2 373 dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len);
af77f26c 374 p += req->r_oid_len;
f24e9980 375
1b83bef2
SW
376 /* ops */
377 ceph_encode_16(&p, num_ops);
68b4476b 378 src_op = src_ops;
1b83bef2
SW
379 req->r_request_ops = p;
380 for (i = 0; i < num_ops; i++, src_op++) {
381 osd_req_encode_op(req, p, src_op);
382 p += sizeof(struct ceph_osd_op);
383 }
68b4476b 384
1b83bef2
SW
385 /* snaps */
386 ceph_encode_64(&p, req->r_snapid);
387 ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
388 ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
389 if (req->r_snapc) {
f24e9980 390 for (i = 0; i < snapc->num_snaps; i++) {
1b83bef2 391 ceph_encode_64(&p, req->r_snapc->snaps[i]);
f24e9980
SW
392 }
393 }
394
1b83bef2
SW
395 req->r_request_attempts = p;
396 p += 4;
397
f44246e3 398 data_len = req->r_trail.length;
68b4476b
YS
399 if (flags & CEPH_OSD_FLAG_WRITE) {
400 req->r_request->hdr.data_off = cpu_to_le16(off);
f44246e3 401 data_len += len;
68b4476b 402 }
f44246e3 403 req->r_request->hdr.data_len = cpu_to_le32(data_len);
c5c6b19d 404
f24e9980 405 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
6f863e71
SW
406 msg_size = p - msg->front.iov_base;
407 msg->front.iov_len = msg_size;
408 msg->hdr.front_len = cpu_to_le32(msg_size);
1b83bef2
SW
409
410 dout("build_request msg_size was %d num_ops %d\n", (int)msg_size,
411 num_ops);
3499e8a5
YS
412 return;
413}
3d14c5d2 414EXPORT_SYMBOL(ceph_osdc_build_request);
3499e8a5
YS
415
416/*
417 * build new request AND message, calculate layout, and adjust file
418 * extent as needed.
419 *
420 * if the file was recently truncated, we include information about its
421 * old and new size so that the object can be updated appropriately. (we
422 * avoid synchronously deleting truncated objects because it's slow.)
423 *
424 * if @do_sync, include a 'startsync' command so that the osd will flush
425 * data quickly.
426 */
427struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
428 struct ceph_file_layout *layout,
429 struct ceph_vino vino,
430 u64 off, u64 *plen,
431 int opcode, int flags,
432 struct ceph_snap_context *snapc,
433 int do_sync,
434 u32 truncate_seq,
435 u64 truncate_size,
436 struct timespec *mtime,
153e5167 437 bool use_mempool)
3499e8a5 438{
ae7ca4a3 439 struct ceph_osd_req_op ops[2];
68b4476b 440 struct ceph_osd_request *req;
ae7ca4a3 441 unsigned int num_op = 1;
47a05811 442 u64 bno = 0;
6816282d 443 int r;
68b4476b 444
ae7ca4a3
AE
445 memset(&ops, 0, sizeof ops);
446
68b4476b
YS
447 ops[0].op = opcode;
448 ops[0].extent.truncate_seq = truncate_seq;
449 ops[0].extent.truncate_size = truncate_size;
68b4476b
YS
450
451 if (do_sync) {
452 ops[1].op = CEPH_OSD_OP_STARTSYNC;
ae7ca4a3
AE
453 num_op++;
454 }
68b4476b 455
ae7ca4a3
AE
456 req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool,
457 GFP_NOFS);
4ad12621 458 if (!req)
6816282d 459 return ERR_PTR(-ENOMEM);
d178a9e7 460 req->r_flags = flags;
3499e8a5
YS
461
462 /* calculate max write size */
60cf5992 463 r = calc_layout(layout, off, plen, ops, &bno);
3ff5f385
AE
464 if (r < 0) {
465 ceph_osdc_put_request(req);
6816282d 466 return ERR_PTR(r);
3ff5f385 467 }
47a05811 468
3499e8a5
YS
469 req->r_file_layout = *layout; /* keep a copy */
470
dbe0fc41
AE
471 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
472 req->r_oid_len = strlen(req->r_oid);
473
ae7ca4a3
AE
474 ceph_osdc_build_request(req, off, *plen, num_op, ops,
475 snapc, vino.snap, mtime);
3499e8a5 476
f24e9980
SW
477 return req;
478}
3d14c5d2 479EXPORT_SYMBOL(ceph_osdc_new_request);
f24e9980
SW
480
481/*
482 * We keep osd requests in an rbtree, sorted by ->r_tid.
483 */
484static void __insert_request(struct ceph_osd_client *osdc,
485 struct ceph_osd_request *new)
486{
487 struct rb_node **p = &osdc->requests.rb_node;
488 struct rb_node *parent = NULL;
489 struct ceph_osd_request *req = NULL;
490
491 while (*p) {
492 parent = *p;
493 req = rb_entry(parent, struct ceph_osd_request, r_node);
494 if (new->r_tid < req->r_tid)
495 p = &(*p)->rb_left;
496 else if (new->r_tid > req->r_tid)
497 p = &(*p)->rb_right;
498 else
499 BUG();
500 }
501
502 rb_link_node(&new->r_node, parent, p);
503 rb_insert_color(&new->r_node, &osdc->requests);
504}
505
506static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
507 u64 tid)
508{
509 struct ceph_osd_request *req;
510 struct rb_node *n = osdc->requests.rb_node;
511
512 while (n) {
513 req = rb_entry(n, struct ceph_osd_request, r_node);
514 if (tid < req->r_tid)
515 n = n->rb_left;
516 else if (tid > req->r_tid)
517 n = n->rb_right;
518 else
519 return req;
520 }
521 return NULL;
522}
523
524static struct ceph_osd_request *
525__lookup_request_ge(struct ceph_osd_client *osdc,
526 u64 tid)
527{
528 struct ceph_osd_request *req;
529 struct rb_node *n = osdc->requests.rb_node;
530
531 while (n) {
532 req = rb_entry(n, struct ceph_osd_request, r_node);
533 if (tid < req->r_tid) {
534 if (!n->rb_left)
535 return req;
536 n = n->rb_left;
537 } else if (tid > req->r_tid) {
538 n = n->rb_right;
539 } else {
540 return req;
541 }
542 }
543 return NULL;
544}
545
6f6c7006
SW
546/*
547 * Resubmit requests pending on the given osd.
548 */
549static void __kick_osd_requests(struct ceph_osd_client *osdc,
550 struct ceph_osd *osd)
551{
a40c4f10 552 struct ceph_osd_request *req, *nreq;
6f6c7006
SW
553 int err;
554
555 dout("__kick_osd_requests osd%d\n", osd->o_osd);
556 err = __reset_osd(osdc, osd);
685a7555 557 if (err)
6f6c7006
SW
558 return;
559
560 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
561 list_move(&req->r_req_lru_item, &osdc->req_unsent);
562 dout("requeued %p tid %llu osd%d\n", req, req->r_tid,
563 osd->o_osd);
a40c4f10
YS
564 if (!req->r_linger)
565 req->r_flags |= CEPH_OSD_FLAG_RETRY;
566 }
567
568 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
569 r_linger_osd) {
77f38e0e
SW
570 /*
571 * reregister request prior to unregistering linger so
572 * that r_osd is preserved.
573 */
574 BUG_ON(!list_empty(&req->r_req_lru_item));
a40c4f10 575 __register_request(osdc, req);
77f38e0e
SW
576 list_add(&req->r_req_lru_item, &osdc->req_unsent);
577 list_add(&req->r_osd_item, &req->r_osd->o_requests);
578 __unregister_linger_request(osdc, req);
a40c4f10
YS
579 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
580 osd->o_osd);
6f6c7006
SW
581 }
582}
583
f24e9980 584/*
81b024e7 585 * If the osd connection drops, we need to resubmit all requests.
f24e9980
SW
586 */
587static void osd_reset(struct ceph_connection *con)
588{
589 struct ceph_osd *osd = con->private;
590 struct ceph_osd_client *osdc;
591
592 if (!osd)
593 return;
594 dout("osd_reset osd%d\n", osd->o_osd);
595 osdc = osd->o_osdc;
f24e9980 596 down_read(&osdc->map_sem);
83aff95e
SW
597 mutex_lock(&osdc->request_mutex);
598 __kick_osd_requests(osdc, osd);
f9d25199 599 __send_queued(osdc);
83aff95e 600 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
601 up_read(&osdc->map_sem);
602}
603
604/*
605 * Track open sessions with osds.
606 */
e10006f8 607static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
f24e9980
SW
608{
609 struct ceph_osd *osd;
610
611 osd = kzalloc(sizeof(*osd), GFP_NOFS);
612 if (!osd)
613 return NULL;
614
615 atomic_set(&osd->o_ref, 1);
616 osd->o_osdc = osdc;
e10006f8 617 osd->o_osd = onum;
f407731d 618 RB_CLEAR_NODE(&osd->o_node);
f24e9980 619 INIT_LIST_HEAD(&osd->o_requests);
a40c4f10 620 INIT_LIST_HEAD(&osd->o_linger_requests);
f5a2041b 621 INIT_LIST_HEAD(&osd->o_osd_lru);
f24e9980
SW
622 osd->o_incarnation = 1;
623
b7a9e5dd 624 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
4e7a5dcd 625
422d2cb8 626 INIT_LIST_HEAD(&osd->o_keepalive_item);
f24e9980
SW
627 return osd;
628}
629
630static struct ceph_osd *get_osd(struct ceph_osd *osd)
631{
632 if (atomic_inc_not_zero(&osd->o_ref)) {
633 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
634 atomic_read(&osd->o_ref));
635 return osd;
636 } else {
637 dout("get_osd %p FAIL\n", osd);
638 return NULL;
639 }
640}
641
642static void put_osd(struct ceph_osd *osd)
643{
644 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
645 atomic_read(&osd->o_ref) - 1);
a255651d 646 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
79494d1b
SW
647 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
648
a255651d 649 if (ac->ops && ac->ops->destroy_authorizer)
6c4a1915 650 ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer);
f24e9980 651 kfree(osd);
79494d1b 652 }
f24e9980
SW
653}
654
655/*
656 * remove an osd from our map
657 */
f5a2041b 658static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 659{
f5a2041b 660 dout("__remove_osd %p\n", osd);
f24e9980
SW
661 BUG_ON(!list_empty(&osd->o_requests));
662 rb_erase(&osd->o_node, &osdc->osds);
f5a2041b 663 list_del_init(&osd->o_osd_lru);
f24e9980
SW
664 ceph_con_close(&osd->o_con);
665 put_osd(osd);
666}
667
aca420bc
SW
668static void remove_all_osds(struct ceph_osd_client *osdc)
669{
048a9d2d 670 dout("%s %p\n", __func__, osdc);
aca420bc
SW
671 mutex_lock(&osdc->request_mutex);
672 while (!RB_EMPTY_ROOT(&osdc->osds)) {
673 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
674 struct ceph_osd, o_node);
675 __remove_osd(osdc, osd);
676 }
677 mutex_unlock(&osdc->request_mutex);
678}
679
f5a2041b
YS
680static void __move_osd_to_lru(struct ceph_osd_client *osdc,
681 struct ceph_osd *osd)
682{
683 dout("__move_osd_to_lru %p\n", osd);
684 BUG_ON(!list_empty(&osd->o_osd_lru));
685 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
3d14c5d2 686 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
f5a2041b
YS
687}
688
689static void __remove_osd_from_lru(struct ceph_osd *osd)
690{
691 dout("__remove_osd_from_lru %p\n", osd);
692 if (!list_empty(&osd->o_osd_lru))
693 list_del_init(&osd->o_osd_lru);
694}
695
aca420bc 696static void remove_old_osds(struct ceph_osd_client *osdc)
f5a2041b
YS
697{
698 struct ceph_osd *osd, *nosd;
699
700 dout("__remove_old_osds %p\n", osdc);
701 mutex_lock(&osdc->request_mutex);
702 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
aca420bc 703 if (time_before(jiffies, osd->lru_ttl))
f5a2041b
YS
704 break;
705 __remove_osd(osdc, osd);
706 }
707 mutex_unlock(&osdc->request_mutex);
708}
709
f24e9980
SW
710/*
711 * reset osd connect
712 */
f5a2041b 713static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 714{
c3acb181 715 struct ceph_entity_addr *peer_addr;
f24e9980 716
f5a2041b 717 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
a40c4f10
YS
718 if (list_empty(&osd->o_requests) &&
719 list_empty(&osd->o_linger_requests)) {
f5a2041b 720 __remove_osd(osdc, osd);
c3acb181
AE
721
722 return -ENODEV;
723 }
724
725 peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
726 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
727 !ceph_con_opened(&osd->o_con)) {
728 struct ceph_osd_request *req;
729
87b315a5
SW
730 dout(" osd addr hasn't changed and connection never opened,"
731 " letting msgr retry");
732 /* touch each r_stamp for handle_timeout()'s benfit */
733 list_for_each_entry(req, &osd->o_requests, r_osd_item)
734 req->r_stamp = jiffies;
c3acb181
AE
735
736 return -EAGAIN;
f24e9980 737 }
c3acb181
AE
738
739 ceph_con_close(&osd->o_con);
740 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
741 osd->o_incarnation++;
742
743 return 0;
f24e9980
SW
744}
745
746static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
747{
748 struct rb_node **p = &osdc->osds.rb_node;
749 struct rb_node *parent = NULL;
750 struct ceph_osd *osd = NULL;
751
aca420bc 752 dout("__insert_osd %p osd%d\n", new, new->o_osd);
f24e9980
SW
753 while (*p) {
754 parent = *p;
755 osd = rb_entry(parent, struct ceph_osd, o_node);
756 if (new->o_osd < osd->o_osd)
757 p = &(*p)->rb_left;
758 else if (new->o_osd > osd->o_osd)
759 p = &(*p)->rb_right;
760 else
761 BUG();
762 }
763
764 rb_link_node(&new->o_node, parent, p);
765 rb_insert_color(&new->o_node, &osdc->osds);
766}
767
768static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
769{
770 struct ceph_osd *osd;
771 struct rb_node *n = osdc->osds.rb_node;
772
773 while (n) {
774 osd = rb_entry(n, struct ceph_osd, o_node);
775 if (o < osd->o_osd)
776 n = n->rb_left;
777 else if (o > osd->o_osd)
778 n = n->rb_right;
779 else
780 return osd;
781 }
782 return NULL;
783}
784
422d2cb8
YS
785static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
786{
787 schedule_delayed_work(&osdc->timeout_work,
3d14c5d2 788 osdc->client->options->osd_keepalive_timeout * HZ);
422d2cb8
YS
789}
790
791static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
792{
793 cancel_delayed_work(&osdc->timeout_work);
794}
f24e9980
SW
795
796/*
797 * Register request, assign tid. If this is the first request, set up
798 * the timeout event.
799 */
a40c4f10
YS
800static void __register_request(struct ceph_osd_client *osdc,
801 struct ceph_osd_request *req)
f24e9980 802{
f24e9980 803 req->r_tid = ++osdc->last_tid;
6df058c0 804 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
77f38e0e 805 dout("__register_request %p tid %lld\n", req, req->r_tid);
f24e9980
SW
806 __insert_request(osdc, req);
807 ceph_osdc_get_request(req);
808 osdc->num_requests++;
f24e9980 809 if (osdc->num_requests == 1) {
422d2cb8
YS
810 dout(" first request, scheduling timeout\n");
811 __schedule_osd_timeout(osdc);
f24e9980 812 }
a40c4f10
YS
813}
814
815static void register_request(struct ceph_osd_client *osdc,
816 struct ceph_osd_request *req)
817{
818 mutex_lock(&osdc->request_mutex);
819 __register_request(osdc, req);
f24e9980
SW
820 mutex_unlock(&osdc->request_mutex);
821}
822
823/*
824 * called under osdc->request_mutex
825 */
826static void __unregister_request(struct ceph_osd_client *osdc,
827 struct ceph_osd_request *req)
828{
35f9f8a0
SW
829 if (RB_EMPTY_NODE(&req->r_node)) {
830 dout("__unregister_request %p tid %lld not registered\n",
831 req, req->r_tid);
832 return;
833 }
834
f24e9980
SW
835 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
836 rb_erase(&req->r_node, &osdc->requests);
837 osdc->num_requests--;
838
0ba6478d
SW
839 if (req->r_osd) {
840 /* make sure the original request isn't in flight. */
6740a845 841 ceph_msg_revoke(req->r_request);
0ba6478d
SW
842
843 list_del_init(&req->r_osd_item);
a40c4f10
YS
844 if (list_empty(&req->r_osd->o_requests) &&
845 list_empty(&req->r_osd->o_linger_requests)) {
846 dout("moving osd to %p lru\n", req->r_osd);
f5a2041b 847 __move_osd_to_lru(osdc, req->r_osd);
a40c4f10 848 }
fbdb9190 849 if (list_empty(&req->r_linger_item))
a40c4f10 850 req->r_osd = NULL;
0ba6478d 851 }
f24e9980 852
7d5f2481 853 list_del_init(&req->r_req_lru_item);
f24e9980
SW
854 ceph_osdc_put_request(req);
855
422d2cb8
YS
856 if (osdc->num_requests == 0) {
857 dout(" no requests, canceling timeout\n");
858 __cancel_osd_timeout(osdc);
f24e9980
SW
859 }
860}
861
862/*
863 * Cancel a previously queued request message
864 */
865static void __cancel_request(struct ceph_osd_request *req)
866{
6bc18876 867 if (req->r_sent && req->r_osd) {
6740a845 868 ceph_msg_revoke(req->r_request);
f24e9980
SW
869 req->r_sent = 0;
870 }
871}
872
a40c4f10
YS
873static void __register_linger_request(struct ceph_osd_client *osdc,
874 struct ceph_osd_request *req)
875{
876 dout("__register_linger_request %p\n", req);
877 list_add_tail(&req->r_linger_item, &osdc->req_linger);
6194ea89
SW
878 if (req->r_osd)
879 list_add_tail(&req->r_linger_osd,
880 &req->r_osd->o_linger_requests);
a40c4f10
YS
881}
882
883static void __unregister_linger_request(struct ceph_osd_client *osdc,
884 struct ceph_osd_request *req)
885{
886 dout("__unregister_linger_request %p\n", req);
61c74035 887 list_del_init(&req->r_linger_item);
a40c4f10 888 if (req->r_osd) {
a40c4f10
YS
889 list_del_init(&req->r_linger_osd);
890
891 if (list_empty(&req->r_osd->o_requests) &&
892 list_empty(&req->r_osd->o_linger_requests)) {
893 dout("moving osd to %p lru\n", req->r_osd);
894 __move_osd_to_lru(osdc, req->r_osd);
895 }
fbdb9190
SW
896 if (list_empty(&req->r_osd_item))
897 req->r_osd = NULL;
a40c4f10
YS
898 }
899}
900
901void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
902 struct ceph_osd_request *req)
903{
904 mutex_lock(&osdc->request_mutex);
905 if (req->r_linger) {
906 __unregister_linger_request(osdc, req);
907 ceph_osdc_put_request(req);
908 }
909 mutex_unlock(&osdc->request_mutex);
910}
911EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
912
913void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
914 struct ceph_osd_request *req)
915{
916 if (!req->r_linger) {
917 dout("set_request_linger %p\n", req);
918 req->r_linger = 1;
919 /*
920 * caller is now responsible for calling
921 * unregister_linger_request
922 */
923 ceph_osdc_get_request(req);
924 }
925}
926EXPORT_SYMBOL(ceph_osdc_set_request_linger);
927
f24e9980
SW
928/*
929 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
930 * (as needed), and set the request r_osd appropriately. If there is
25985edc 931 * no up osd, set r_osd to NULL. Move the request to the appropriate list
6f6c7006 932 * (unsent, homeless) or leave on in-flight lru.
f24e9980
SW
933 *
934 * Return 0 if unchanged, 1 if changed, or negative on error.
935 *
936 * Caller should hold map_sem for read and request_mutex.
937 */
6f6c7006 938static int __map_request(struct ceph_osd_client *osdc,
38d6453c 939 struct ceph_osd_request *req, int force_resend)
f24e9980 940{
5b191d99 941 struct ceph_pg pgid;
d85b7056
SW
942 int acting[CEPH_PG_MAX_SIZE];
943 int o = -1, num = 0;
f24e9980 944 int err;
f24e9980 945
6f6c7006 946 dout("map_request %p tid %lld\n", req, req->r_tid);
41766f87
AE
947 err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap,
948 ceph_file_layout_pg_pool(req->r_file_layout));
6f6c7006
SW
949 if (err) {
950 list_move(&req->r_req_lru_item, &osdc->req_notarget);
f24e9980 951 return err;
6f6c7006 952 }
7740a42f
SW
953 req->r_pgid = pgid;
954
d85b7056
SW
955 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
956 if (err > 0) {
957 o = acting[0];
958 num = err;
959 }
f24e9980 960
38d6453c
SW
961 if ((!force_resend &&
962 req->r_osd && req->r_osd->o_osd == o &&
d85b7056
SW
963 req->r_sent >= req->r_osd->o_incarnation &&
964 req->r_num_pg_osds == num &&
965 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
f24e9980
SW
966 (req->r_osd == NULL && o == -1))
967 return 0; /* no change */
968
5b191d99
SW
969 dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
970 req->r_tid, pgid.pool, pgid.seed, o,
f24e9980
SW
971 req->r_osd ? req->r_osd->o_osd : -1);
972
d85b7056
SW
973 /* record full pg acting set */
974 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
975 req->r_num_pg_osds = num;
976
f24e9980
SW
977 if (req->r_osd) {
978 __cancel_request(req);
979 list_del_init(&req->r_osd_item);
f24e9980
SW
980 req->r_osd = NULL;
981 }
982
983 req->r_osd = __lookup_osd(osdc, o);
984 if (!req->r_osd && o >= 0) {
c99eb1c7 985 err = -ENOMEM;
e10006f8 986 req->r_osd = create_osd(osdc, o);
6f6c7006
SW
987 if (!req->r_osd) {
988 list_move(&req->r_req_lru_item, &osdc->req_notarget);
c99eb1c7 989 goto out;
6f6c7006 990 }
f24e9980 991
6f6c7006 992 dout("map_request osd %p is osd%d\n", req->r_osd, o);
f24e9980
SW
993 __insert_osd(osdc, req->r_osd);
994
b7a9e5dd
SW
995 ceph_con_open(&req->r_osd->o_con,
996 CEPH_ENTITY_TYPE_OSD, o,
997 &osdc->osdmap->osd_addr[o]);
f24e9980
SW
998 }
999
f5a2041b
YS
1000 if (req->r_osd) {
1001 __remove_osd_from_lru(req->r_osd);
f24e9980 1002 list_add(&req->r_osd_item, &req->r_osd->o_requests);
6f6c7006
SW
1003 list_move(&req->r_req_lru_item, &osdc->req_unsent);
1004 } else {
1005 list_move(&req->r_req_lru_item, &osdc->req_notarget);
f5a2041b 1006 }
d85b7056 1007 err = 1; /* osd or pg changed */
f24e9980
SW
1008
1009out:
f24e9980
SW
1010 return err;
1011}
1012
1013/*
1014 * caller should hold map_sem (for read) and request_mutex
1015 */
56e925b6
SW
1016static void __send_request(struct ceph_osd_client *osdc,
1017 struct ceph_osd_request *req)
f24e9980 1018{
1b83bef2 1019 void *p;
f24e9980 1020
1b83bef2
SW
1021 dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
1022 req, req->r_tid, req->r_osd->o_osd, req->r_flags,
1023 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
1024
1025 /* fill in message content that changes each time we send it */
1026 put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
1027 put_unaligned_le32(req->r_flags, req->r_request_flags);
1028 put_unaligned_le64(req->r_pgid.pool, req->r_request_pool);
1029 p = req->r_request_pgid;
1030 ceph_encode_64(&p, req->r_pgid.pool);
1031 ceph_encode_32(&p, req->r_pgid.seed);
1032 put_unaligned_le64(1, req->r_request_attempts); /* FIXME */
1033 memcpy(req->r_request_reassert_version, &req->r_reassert_version,
1034 sizeof(req->r_reassert_version));
2169aea6 1035
3dd72fc0 1036 req->r_stamp = jiffies;
07a27e22 1037 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
f24e9980
SW
1038
1039 ceph_msg_get(req->r_request); /* send consumes a ref */
1040 ceph_con_send(&req->r_osd->o_con, req->r_request);
1041 req->r_sent = req->r_osd->o_incarnation;
f24e9980
SW
1042}
1043
6f6c7006
SW
1044/*
1045 * Send any requests in the queue (req_unsent).
1046 */
f9d25199 1047static void __send_queued(struct ceph_osd_client *osdc)
6f6c7006
SW
1048{
1049 struct ceph_osd_request *req, *tmp;
1050
f9d25199
AE
1051 dout("__send_queued\n");
1052 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
6f6c7006 1053 __send_request(osdc, req);
6f6c7006
SW
1054}
1055
f24e9980
SW
1056/*
1057 * Timeout callback, called every N seconds when 1 or more osd
1058 * requests has been active for more than N seconds. When this
1059 * happens, we ping all OSDs with requests who have timed out to
1060 * ensure any communications channel reset is detected. Reset the
1061 * request timeouts another N seconds in the future as we go.
1062 * Reschedule the timeout event another N seconds in future (unless
1063 * there are no open requests).
1064 */
1065static void handle_timeout(struct work_struct *work)
1066{
1067 struct ceph_osd_client *osdc =
1068 container_of(work, struct ceph_osd_client, timeout_work.work);
83aff95e 1069 struct ceph_osd_request *req;
f24e9980 1070 struct ceph_osd *osd;
422d2cb8 1071 unsigned long keepalive =
3d14c5d2 1072 osdc->client->options->osd_keepalive_timeout * HZ;
422d2cb8 1073 struct list_head slow_osds;
f24e9980
SW
1074 dout("timeout\n");
1075 down_read(&osdc->map_sem);
1076
1077 ceph_monc_request_next_osdmap(&osdc->client->monc);
1078
1079 mutex_lock(&osdc->request_mutex);
f24e9980 1080
422d2cb8
YS
1081 /*
1082 * ping osds that are a bit slow. this ensures that if there
1083 * is a break in the TCP connection we will notice, and reopen
1084 * a connection with that osd (from the fault callback).
1085 */
1086 INIT_LIST_HEAD(&slow_osds);
1087 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
3dd72fc0 1088 if (time_before(jiffies, req->r_stamp + keepalive))
422d2cb8
YS
1089 break;
1090
1091 osd = req->r_osd;
1092 BUG_ON(!osd);
1093 dout(" tid %llu is slow, will send keepalive on osd%d\n",
f24e9980 1094 req->r_tid, osd->o_osd);
422d2cb8
YS
1095 list_move_tail(&osd->o_keepalive_item, &slow_osds);
1096 }
1097 while (!list_empty(&slow_osds)) {
1098 osd = list_entry(slow_osds.next, struct ceph_osd,
1099 o_keepalive_item);
1100 list_del_init(&osd->o_keepalive_item);
f24e9980
SW
1101 ceph_con_keepalive(&osd->o_con);
1102 }
1103
422d2cb8 1104 __schedule_osd_timeout(osdc);
f9d25199 1105 __send_queued(osdc);
f24e9980 1106 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1107 up_read(&osdc->map_sem);
1108}
1109
f5a2041b
YS
1110static void handle_osds_timeout(struct work_struct *work)
1111{
1112 struct ceph_osd_client *osdc =
1113 container_of(work, struct ceph_osd_client,
1114 osds_timeout_work.work);
1115 unsigned long delay =
3d14c5d2 1116 osdc->client->options->osd_idle_ttl * HZ >> 2;
f5a2041b
YS
1117
1118 dout("osds timeout\n");
1119 down_read(&osdc->map_sem);
aca420bc 1120 remove_old_osds(osdc);
f5a2041b
YS
1121 up_read(&osdc->map_sem);
1122
1123 schedule_delayed_work(&osdc->osds_timeout_work,
1124 round_jiffies_relative(delay));
1125}
1126
25845472
SW
1127static void complete_request(struct ceph_osd_request *req)
1128{
1129 if (req->r_safe_callback)
1130 req->r_safe_callback(req, NULL);
1131 complete_all(&req->r_safe_completion); /* fsync waiter */
1132}
1133
1b83bef2
SW
1134static int __decode_pgid(void **p, void *end, struct ceph_pg *pgid)
1135{
1136 __u8 v;
1137
1138 ceph_decode_need(p, end, 1 + 8 + 4 + 4, bad);
1139 v = ceph_decode_8(p);
1140 if (v > 1) {
1141 pr_warning("do not understand pg encoding %d > 1", v);
1142 return -EINVAL;
1143 }
1144 pgid->pool = ceph_decode_64(p);
1145 pgid->seed = ceph_decode_32(p);
1146 *p += 4;
1147 return 0;
1148
1149bad:
1150 pr_warning("incomplete pg encoding");
1151 return -EINVAL;
1152}
1153
f24e9980
SW
1154/*
1155 * handle osd op reply. either call the callback if it is specified,
1156 * or do the completion to wake up the waiting thread.
1157 */
350b1c32
SW
1158static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1159 struct ceph_connection *con)
f24e9980 1160{
1b83bef2 1161 void *p, *end;
f24e9980
SW
1162 struct ceph_osd_request *req;
1163 u64 tid;
1b83bef2
SW
1164 int object_len;
1165 int numops, payload_len, flags;
0ceed5db 1166 s32 result;
1b83bef2
SW
1167 s32 retry_attempt;
1168 struct ceph_pg pg;
1169 int err;
1170 u32 reassert_epoch;
1171 u64 reassert_version;
1172 u32 osdmap_epoch;
0d5af164 1173 int already_completed;
1b83bef2 1174 int i;
f24e9980 1175
6df058c0 1176 tid = le64_to_cpu(msg->hdr.tid);
1b83bef2
SW
1177 dout("handle_reply %p tid %llu\n", msg, tid);
1178
1179 p = msg->front.iov_base;
1180 end = p + msg->front.iov_len;
1181
1182 ceph_decode_need(&p, end, 4, bad);
1183 object_len = ceph_decode_32(&p);
1184 ceph_decode_need(&p, end, object_len, bad);
1185 p += object_len;
1186
1187 err = __decode_pgid(&p, end, &pg);
1188 if (err)
f24e9980 1189 goto bad;
1b83bef2
SW
1190
1191 ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
1192 flags = ceph_decode_64(&p);
1193 result = ceph_decode_32(&p);
1194 reassert_epoch = ceph_decode_32(&p);
1195 reassert_version = ceph_decode_64(&p);
1196 osdmap_epoch = ceph_decode_32(&p);
1197
f24e9980
SW
1198 /* lookup */
1199 mutex_lock(&osdc->request_mutex);
1200 req = __lookup_request(osdc, tid);
1201 if (req == NULL) {
1202 dout("handle_reply tid %llu dne\n", tid);
1203 mutex_unlock(&osdc->request_mutex);
1204 return;
1205 }
1206 ceph_osdc_get_request(req);
1b83bef2
SW
1207
1208 dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
1209 req, result);
1210
1211 ceph_decode_need(&p, end, 4, bad);
1212 numops = ceph_decode_32(&p);
1213 if (numops > CEPH_OSD_MAX_OP)
1214 goto bad_put;
1215 if (numops != req->r_num_ops)
1216 goto bad_put;
1217 payload_len = 0;
1218 ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad);
1219 for (i = 0; i < numops; i++) {
1220 struct ceph_osd_op *op = p;
1221 int len;
1222
1223 len = le32_to_cpu(op->payload_len);
1224 req->r_reply_op_len[i] = len;
1225 dout(" op %d has %d bytes\n", i, len);
1226 payload_len += len;
1227 p += sizeof(*op);
1228 }
1229 if (payload_len != le32_to_cpu(msg->hdr.data_len)) {
1230 pr_warning("sum of op payload lens %d != data_len %d",
1231 payload_len, le32_to_cpu(msg->hdr.data_len));
1232 goto bad_put;
1233 }
1234
1235 ceph_decode_need(&p, end, 4 + numops * 4, bad);
1236 retry_attempt = ceph_decode_32(&p);
1237 for (i = 0; i < numops; i++)
1238 req->r_reply_op_result[i] = ceph_decode_32(&p);
f24e9980 1239
350b1c32 1240 /*
0d59ab81 1241 * if this connection filled our message, drop our reference now, to
350b1c32
SW
1242 * avoid a (safe but slower) revoke later.
1243 */
0d59ab81 1244 if (req->r_con_filling_msg == con && req->r_reply == msg) {
c16e7869 1245 dout(" dropping con_filling_msg ref %p\n", con);
0d59ab81 1246 req->r_con_filling_msg = NULL;
0d47766f 1247 con->ops->put(con);
350b1c32
SW
1248 }
1249
f24e9980 1250 if (!req->r_got_reply) {
95c96174 1251 unsigned int bytes;
f24e9980 1252
1b83bef2 1253 req->r_result = result;
f24e9980
SW
1254 bytes = le32_to_cpu(msg->hdr.data_len);
1255 dout("handle_reply result %d bytes %d\n", req->r_result,
1256 bytes);
1257 if (req->r_result == 0)
1258 req->r_result = bytes;
1259
1260 /* in case this is a write and we need to replay, */
1b83bef2
SW
1261 req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
1262 req->r_reassert_version.version = cpu_to_le64(reassert_version);
f24e9980
SW
1263
1264 req->r_got_reply = 1;
1265 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1266 dout("handle_reply tid %llu dup ack\n", tid);
34b43a56 1267 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1268 goto done;
1269 }
1270
1271 dout("handle_reply tid %llu flags %d\n", tid, flags);
1272
a40c4f10
YS
1273 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
1274 __register_linger_request(osdc, req);
1275
f24e9980 1276 /* either this is a read, or we got the safe response */
0ceed5db
SW
1277 if (result < 0 ||
1278 (flags & CEPH_OSD_FLAG_ONDISK) ||
f24e9980
SW
1279 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
1280 __unregister_request(osdc, req);
1281
0d5af164
AE
1282 already_completed = req->r_completed;
1283 req->r_completed = 1;
f24e9980 1284 mutex_unlock(&osdc->request_mutex);
0d5af164
AE
1285 if (already_completed)
1286 goto done;
f24e9980
SW
1287
1288 if (req->r_callback)
1289 req->r_callback(req, msg);
1290 else
03066f23 1291 complete_all(&req->r_completion);
f24e9980 1292
25845472
SW
1293 if (flags & CEPH_OSD_FLAG_ONDISK)
1294 complete_request(req);
f24e9980
SW
1295
1296done:
a40c4f10 1297 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
f24e9980
SW
1298 ceph_osdc_put_request(req);
1299 return;
1300
1b83bef2
SW
1301bad_put:
1302 ceph_osdc_put_request(req);
f24e9980 1303bad:
1b83bef2
SW
1304 pr_err("corrupt osd_op_reply got %d %d\n",
1305 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
9ec7cab1 1306 ceph_msg_dump(msg);
f24e9980
SW
1307}
1308
6f6c7006 1309static void reset_changed_osds(struct ceph_osd_client *osdc)
f24e9980 1310{
f24e9980 1311 struct rb_node *p, *n;
f24e9980 1312
6f6c7006
SW
1313 for (p = rb_first(&osdc->osds); p; p = n) {
1314 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
f24e9980 1315
6f6c7006
SW
1316 n = rb_next(p);
1317 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1318 memcmp(&osd->o_con.peer_addr,
1319 ceph_osd_addr(osdc->osdmap,
1320 osd->o_osd),
1321 sizeof(struct ceph_entity_addr)) != 0)
1322 __reset_osd(osdc, osd);
f24e9980 1323 }
422d2cb8
YS
1324}
1325
1326/*
6f6c7006
SW
1327 * Requeue requests whose mapping to an OSD has changed. If requests map to
1328 * no osd, request a new map.
422d2cb8 1329 *
e6d50f67 1330 * Caller should hold map_sem for read.
422d2cb8 1331 */
38d6453c 1332static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
422d2cb8 1333{
a40c4f10 1334 struct ceph_osd_request *req, *nreq;
6f6c7006
SW
1335 struct rb_node *p;
1336 int needmap = 0;
1337 int err;
422d2cb8 1338
38d6453c 1339 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
422d2cb8 1340 mutex_lock(&osdc->request_mutex);
6194ea89 1341 for (p = rb_first(&osdc->requests); p; ) {
6f6c7006 1342 req = rb_entry(p, struct ceph_osd_request, r_node);
6194ea89 1343 p = rb_next(p);
ab60b16d
AE
1344
1345 /*
1346 * For linger requests that have not yet been
1347 * registered, move them to the linger list; they'll
1348 * be sent to the osd in the loop below. Unregister
1349 * the request before re-registering it as a linger
1350 * request to ensure the __map_request() below
1351 * will decide it needs to be sent.
1352 */
1353 if (req->r_linger && list_empty(&req->r_linger_item)) {
1354 dout("%p tid %llu restart on osd%d\n",
1355 req, req->r_tid,
1356 req->r_osd ? req->r_osd->o_osd : -1);
1357 __unregister_request(osdc, req);
1358 __register_linger_request(osdc, req);
1359 continue;
1360 }
1361
38d6453c 1362 err = __map_request(osdc, req, force_resend);
6f6c7006
SW
1363 if (err < 0)
1364 continue; /* error */
1365 if (req->r_osd == NULL) {
1366 dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1367 needmap++; /* request a newer map */
1368 } else if (err > 0) {
6194ea89
SW
1369 if (!req->r_linger) {
1370 dout("%p tid %llu requeued on osd%d\n", req,
1371 req->r_tid,
1372 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1373 req->r_flags |= CEPH_OSD_FLAG_RETRY;
6194ea89
SW
1374 }
1375 }
a40c4f10
YS
1376 }
1377
1378 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
1379 r_linger_item) {
1380 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1381
38d6453c 1382 err = __map_request(osdc, req, force_resend);
ab60b16d 1383 dout("__map_request returned %d\n", err);
a40c4f10
YS
1384 if (err == 0)
1385 continue; /* no change and no osd was specified */
1386 if (err < 0)
1387 continue; /* hrm! */
1388 if (req->r_osd == NULL) {
1389 dout("tid %llu maps to no valid osd\n", req->r_tid);
1390 needmap++; /* request a newer map */
1391 continue;
6f6c7006 1392 }
a40c4f10
YS
1393
1394 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1395 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1396 __register_request(osdc, req);
c89ce05e 1397 __unregister_linger_request(osdc, req);
6f6c7006 1398 }
f24e9980
SW
1399 mutex_unlock(&osdc->request_mutex);
1400
1401 if (needmap) {
1402 dout("%d requests for down osds, need new map\n", needmap);
1403 ceph_monc_request_next_osdmap(&osdc->client->monc);
1404 }
e6d50f67 1405 reset_changed_osds(osdc);
422d2cb8 1406}
6f6c7006
SW
1407
1408
f24e9980
SW
1409/*
1410 * Process updated osd map.
1411 *
1412 * The message contains any number of incremental and full maps, normally
1413 * indicating some sort of topology change in the cluster. Kick requests
1414 * off to different OSDs as needed.
1415 */
1416void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1417{
1418 void *p, *end, *next;
1419 u32 nr_maps, maplen;
1420 u32 epoch;
1421 struct ceph_osdmap *newmap = NULL, *oldmap;
1422 int err;
1423 struct ceph_fsid fsid;
1424
1425 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
1426 p = msg->front.iov_base;
1427 end = p + msg->front.iov_len;
1428
1429 /* verify fsid */
1430 ceph_decode_need(&p, end, sizeof(fsid), bad);
1431 ceph_decode_copy(&p, &fsid, sizeof(fsid));
0743304d
SW
1432 if (ceph_check_fsid(osdc->client, &fsid) < 0)
1433 return;
f24e9980
SW
1434
1435 down_write(&osdc->map_sem);
1436
1437 /* incremental maps */
1438 ceph_decode_32_safe(&p, end, nr_maps, bad);
1439 dout(" %d inc maps\n", nr_maps);
1440 while (nr_maps > 0) {
1441 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1442 epoch = ceph_decode_32(&p);
1443 maplen = ceph_decode_32(&p);
f24e9980
SW
1444 ceph_decode_need(&p, end, maplen, bad);
1445 next = p + maplen;
1446 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
1447 dout("applying incremental map %u len %d\n",
1448 epoch, maplen);
1449 newmap = osdmap_apply_incremental(&p, next,
1450 osdc->osdmap,
15d9882c 1451 &osdc->client->msgr);
f24e9980
SW
1452 if (IS_ERR(newmap)) {
1453 err = PTR_ERR(newmap);
1454 goto bad;
1455 }
30dc6381 1456 BUG_ON(!newmap);
f24e9980
SW
1457 if (newmap != osdc->osdmap) {
1458 ceph_osdmap_destroy(osdc->osdmap);
1459 osdc->osdmap = newmap;
1460 }
38d6453c 1461 kick_requests(osdc, 0);
f24e9980
SW
1462 } else {
1463 dout("ignoring incremental map %u len %d\n",
1464 epoch, maplen);
1465 }
1466 p = next;
1467 nr_maps--;
1468 }
1469 if (newmap)
1470 goto done;
1471
1472 /* full maps */
1473 ceph_decode_32_safe(&p, end, nr_maps, bad);
1474 dout(" %d full maps\n", nr_maps);
1475 while (nr_maps) {
1476 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1477 epoch = ceph_decode_32(&p);
1478 maplen = ceph_decode_32(&p);
f24e9980
SW
1479 ceph_decode_need(&p, end, maplen, bad);
1480 if (nr_maps > 1) {
1481 dout("skipping non-latest full map %u len %d\n",
1482 epoch, maplen);
1483 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
1484 dout("skipping full map %u len %d, "
1485 "older than our %u\n", epoch, maplen,
1486 osdc->osdmap->epoch);
1487 } else {
38d6453c
SW
1488 int skipped_map = 0;
1489
f24e9980
SW
1490 dout("taking full map %u len %d\n", epoch, maplen);
1491 newmap = osdmap_decode(&p, p+maplen);
1492 if (IS_ERR(newmap)) {
1493 err = PTR_ERR(newmap);
1494 goto bad;
1495 }
30dc6381 1496 BUG_ON(!newmap);
f24e9980
SW
1497 oldmap = osdc->osdmap;
1498 osdc->osdmap = newmap;
38d6453c
SW
1499 if (oldmap) {
1500 if (oldmap->epoch + 1 < newmap->epoch)
1501 skipped_map = 1;
f24e9980 1502 ceph_osdmap_destroy(oldmap);
38d6453c
SW
1503 }
1504 kick_requests(osdc, skipped_map);
f24e9980
SW
1505 }
1506 p += maplen;
1507 nr_maps--;
1508 }
1509
1510done:
1511 downgrade_write(&osdc->map_sem);
1512 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
cd634fb6
SW
1513
1514 /*
1515 * subscribe to subsequent osdmap updates if full to ensure
1516 * we find out when we are no longer full and stop returning
1517 * ENOSPC.
1518 */
1519 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1520 ceph_monc_request_next_osdmap(&osdc->client->monc);
1521
f9d25199
AE
1522 mutex_lock(&osdc->request_mutex);
1523 __send_queued(osdc);
1524 mutex_unlock(&osdc->request_mutex);
f24e9980 1525 up_read(&osdc->map_sem);
03066f23 1526 wake_up_all(&osdc->client->auth_wq);
f24e9980
SW
1527 return;
1528
1529bad:
1530 pr_err("osdc handle_map corrupt msg\n");
9ec7cab1 1531 ceph_msg_dump(msg);
f24e9980
SW
1532 up_write(&osdc->map_sem);
1533 return;
1534}
1535
a40c4f10
YS
1536/*
1537 * watch/notify callback event infrastructure
1538 *
1539 * These callbacks are used both for watch and notify operations.
1540 */
1541static void __release_event(struct kref *kref)
1542{
1543 struct ceph_osd_event *event =
1544 container_of(kref, struct ceph_osd_event, kref);
1545
1546 dout("__release_event %p\n", event);
1547 kfree(event);
1548}
1549
1550static void get_event(struct ceph_osd_event *event)
1551{
1552 kref_get(&event->kref);
1553}
1554
1555void ceph_osdc_put_event(struct ceph_osd_event *event)
1556{
1557 kref_put(&event->kref, __release_event);
1558}
1559EXPORT_SYMBOL(ceph_osdc_put_event);
1560
1561static void __insert_event(struct ceph_osd_client *osdc,
1562 struct ceph_osd_event *new)
1563{
1564 struct rb_node **p = &osdc->event_tree.rb_node;
1565 struct rb_node *parent = NULL;
1566 struct ceph_osd_event *event = NULL;
1567
1568 while (*p) {
1569 parent = *p;
1570 event = rb_entry(parent, struct ceph_osd_event, node);
1571 if (new->cookie < event->cookie)
1572 p = &(*p)->rb_left;
1573 else if (new->cookie > event->cookie)
1574 p = &(*p)->rb_right;
1575 else
1576 BUG();
1577 }
1578
1579 rb_link_node(&new->node, parent, p);
1580 rb_insert_color(&new->node, &osdc->event_tree);
1581}
1582
1583static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
1584 u64 cookie)
1585{
1586 struct rb_node **p = &osdc->event_tree.rb_node;
1587 struct rb_node *parent = NULL;
1588 struct ceph_osd_event *event = NULL;
1589
1590 while (*p) {
1591 parent = *p;
1592 event = rb_entry(parent, struct ceph_osd_event, node);
1593 if (cookie < event->cookie)
1594 p = &(*p)->rb_left;
1595 else if (cookie > event->cookie)
1596 p = &(*p)->rb_right;
1597 else
1598 return event;
1599 }
1600 return NULL;
1601}
1602
1603static void __remove_event(struct ceph_osd_event *event)
1604{
1605 struct ceph_osd_client *osdc = event->osdc;
1606
1607 if (!RB_EMPTY_NODE(&event->node)) {
1608 dout("__remove_event removed %p\n", event);
1609 rb_erase(&event->node, &osdc->event_tree);
1610 ceph_osdc_put_event(event);
1611 } else {
1612 dout("__remove_event didn't remove %p\n", event);
1613 }
1614}
1615
1616int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1617 void (*event_cb)(u64, u64, u8, void *),
3c663bbd 1618 void *data, struct ceph_osd_event **pevent)
a40c4f10
YS
1619{
1620 struct ceph_osd_event *event;
1621
1622 event = kmalloc(sizeof(*event), GFP_NOIO);
1623 if (!event)
1624 return -ENOMEM;
1625
1626 dout("create_event %p\n", event);
1627 event->cb = event_cb;
3c663bbd 1628 event->one_shot = 0;
a40c4f10
YS
1629 event->data = data;
1630 event->osdc = osdc;
1631 INIT_LIST_HEAD(&event->osd_node);
3ee5234d 1632 RB_CLEAR_NODE(&event->node);
a40c4f10
YS
1633 kref_init(&event->kref); /* one ref for us */
1634 kref_get(&event->kref); /* one ref for the caller */
a40c4f10
YS
1635
1636 spin_lock(&osdc->event_lock);
1637 event->cookie = ++osdc->event_count;
1638 __insert_event(osdc, event);
1639 spin_unlock(&osdc->event_lock);
1640
1641 *pevent = event;
1642 return 0;
1643}
1644EXPORT_SYMBOL(ceph_osdc_create_event);
1645
1646void ceph_osdc_cancel_event(struct ceph_osd_event *event)
1647{
1648 struct ceph_osd_client *osdc = event->osdc;
1649
1650 dout("cancel_event %p\n", event);
1651 spin_lock(&osdc->event_lock);
1652 __remove_event(event);
1653 spin_unlock(&osdc->event_lock);
1654 ceph_osdc_put_event(event); /* caller's */
1655}
1656EXPORT_SYMBOL(ceph_osdc_cancel_event);
1657
1658
1659static void do_event_work(struct work_struct *work)
1660{
1661 struct ceph_osd_event_work *event_work =
1662 container_of(work, struct ceph_osd_event_work, work);
1663 struct ceph_osd_event *event = event_work->event;
1664 u64 ver = event_work->ver;
1665 u64 notify_id = event_work->notify_id;
1666 u8 opcode = event_work->opcode;
1667
1668 dout("do_event_work completing %p\n", event);
1669 event->cb(ver, notify_id, opcode, event->data);
a40c4f10
YS
1670 dout("do_event_work completed %p\n", event);
1671 ceph_osdc_put_event(event);
1672 kfree(event_work);
1673}
1674
1675
1676/*
1677 * Process osd watch notifications
1678 */
3c663bbd
AE
1679static void handle_watch_notify(struct ceph_osd_client *osdc,
1680 struct ceph_msg *msg)
a40c4f10
YS
1681{
1682 void *p, *end;
1683 u8 proto_ver;
1684 u64 cookie, ver, notify_id;
1685 u8 opcode;
1686 struct ceph_osd_event *event;
1687 struct ceph_osd_event_work *event_work;
1688
1689 p = msg->front.iov_base;
1690 end = p + msg->front.iov_len;
1691
1692 ceph_decode_8_safe(&p, end, proto_ver, bad);
1693 ceph_decode_8_safe(&p, end, opcode, bad);
1694 ceph_decode_64_safe(&p, end, cookie, bad);
1695 ceph_decode_64_safe(&p, end, ver, bad);
1696 ceph_decode_64_safe(&p, end, notify_id, bad);
1697
1698 spin_lock(&osdc->event_lock);
1699 event = __find_event(osdc, cookie);
1700 if (event) {
3c663bbd 1701 BUG_ON(event->one_shot);
a40c4f10 1702 get_event(event);
a40c4f10
YS
1703 }
1704 spin_unlock(&osdc->event_lock);
1705 dout("handle_watch_notify cookie %lld ver %lld event %p\n",
1706 cookie, ver, event);
1707 if (event) {
1708 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
a40c4f10
YS
1709 if (!event_work) {
1710 dout("ERROR: could not allocate event_work\n");
1711 goto done_err;
1712 }
6b0ae409 1713 INIT_WORK(&event_work->work, do_event_work);
a40c4f10
YS
1714 event_work->event = event;
1715 event_work->ver = ver;
1716 event_work->notify_id = notify_id;
1717 event_work->opcode = opcode;
1718 if (!queue_work(osdc->notify_wq, &event_work->work)) {
1719 dout("WARNING: failed to queue notify event work\n");
1720 goto done_err;
1721 }
1722 }
1723
1724 return;
1725
1726done_err:
a40c4f10
YS
1727 ceph_osdc_put_event(event);
1728 return;
1729
1730bad:
1731 pr_err("osdc handle_watch_notify corrupt msg\n");
1732 return;
1733}
1734
f24e9980
SW
1735/*
1736 * Register request, send initial attempt.
1737 */
1738int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1739 struct ceph_osd_request *req,
1740 bool nofail)
1741{
c1ea8823 1742 int rc = 0;
f24e9980 1743
2ac2b7a6
AE
1744 if (req->r_data.type == CEPH_OSD_DATA_TYPE_PAGES) {
1745 req->r_request->pages = req->r_data.pages;
1746 req->r_request->page_count = req->r_data.num_pages;
1747 req->r_request->page_alignment = req->r_data.alignment;
68b4476b 1748#ifdef CONFIG_BLOCK
2ac2b7a6
AE
1749 } else if (req->r_data.type == CEPH_OSD_DATA_TYPE_BIO) {
1750 req->r_request->bio = req->r_data.bio;
68b4476b 1751#endif
2ac2b7a6
AE
1752 } else {
1753 pr_err("unknown request data type %d\n", req->r_data.type);
1754 }
c885837f 1755 req->r_request->trail = &req->r_trail;
f24e9980
SW
1756
1757 register_request(osdc, req);
1758
1759 down_read(&osdc->map_sem);
1760 mutex_lock(&osdc->request_mutex);
c1ea8823
SW
1761 /*
1762 * a racing kick_requests() may have sent the message for us
1763 * while we dropped request_mutex above, so only send now if
1764 * the request still han't been touched yet.
1765 */
1766 if (req->r_sent == 0) {
38d6453c 1767 rc = __map_request(osdc, req, 0);
9d6fcb08
SW
1768 if (rc < 0) {
1769 if (nofail) {
1770 dout("osdc_start_request failed map, "
1771 " will retry %lld\n", req->r_tid);
1772 rc = 0;
1773 }
234af26f 1774 goto out_unlock;
9d6fcb08 1775 }
6f6c7006
SW
1776 if (req->r_osd == NULL) {
1777 dout("send_request %p no up osds in pg\n", req);
1778 ceph_monc_request_next_osdmap(&osdc->client->monc);
1779 } else {
56e925b6 1780 __send_request(osdc, req);
f24e9980 1781 }
56e925b6 1782 rc = 0;
f24e9980 1783 }
234af26f
DC
1784
1785out_unlock:
f24e9980
SW
1786 mutex_unlock(&osdc->request_mutex);
1787 up_read(&osdc->map_sem);
1788 return rc;
1789}
3d14c5d2 1790EXPORT_SYMBOL(ceph_osdc_start_request);
f24e9980
SW
1791
1792/*
1793 * wait for a request to complete
1794 */
1795int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1796 struct ceph_osd_request *req)
1797{
1798 int rc;
1799
1800 rc = wait_for_completion_interruptible(&req->r_completion);
1801 if (rc < 0) {
1802 mutex_lock(&osdc->request_mutex);
1803 __cancel_request(req);
529cfcc4 1804 __unregister_request(osdc, req);
f24e9980 1805 mutex_unlock(&osdc->request_mutex);
25845472 1806 complete_request(req);
529cfcc4 1807 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
f24e9980
SW
1808 return rc;
1809 }
1810
1811 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1812 return req->r_result;
1813}
3d14c5d2 1814EXPORT_SYMBOL(ceph_osdc_wait_request);
f24e9980
SW
1815
1816/*
1817 * sync - wait for all in-flight requests to flush. avoid starvation.
1818 */
1819void ceph_osdc_sync(struct ceph_osd_client *osdc)
1820{
1821 struct ceph_osd_request *req;
1822 u64 last_tid, next_tid = 0;
1823
1824 mutex_lock(&osdc->request_mutex);
1825 last_tid = osdc->last_tid;
1826 while (1) {
1827 req = __lookup_request_ge(osdc, next_tid);
1828 if (!req)
1829 break;
1830 if (req->r_tid > last_tid)
1831 break;
1832
1833 next_tid = req->r_tid + 1;
1834 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
1835 continue;
1836
1837 ceph_osdc_get_request(req);
1838 mutex_unlock(&osdc->request_mutex);
1839 dout("sync waiting on tid %llu (last is %llu)\n",
1840 req->r_tid, last_tid);
1841 wait_for_completion(&req->r_safe_completion);
1842 mutex_lock(&osdc->request_mutex);
1843 ceph_osdc_put_request(req);
1844 }
1845 mutex_unlock(&osdc->request_mutex);
1846 dout("sync done (thru tid %llu)\n", last_tid);
1847}
3d14c5d2 1848EXPORT_SYMBOL(ceph_osdc_sync);
f24e9980
SW
1849
1850/*
1851 * init, shutdown
1852 */
1853int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1854{
1855 int err;
1856
1857 dout("init\n");
1858 osdc->client = client;
1859 osdc->osdmap = NULL;
1860 init_rwsem(&osdc->map_sem);
1861 init_completion(&osdc->map_waiters);
1862 osdc->last_requested_map = 0;
1863 mutex_init(&osdc->request_mutex);
f24e9980
SW
1864 osdc->last_tid = 0;
1865 osdc->osds = RB_ROOT;
f5a2041b 1866 INIT_LIST_HEAD(&osdc->osd_lru);
f24e9980 1867 osdc->requests = RB_ROOT;
422d2cb8 1868 INIT_LIST_HEAD(&osdc->req_lru);
6f6c7006
SW
1869 INIT_LIST_HEAD(&osdc->req_unsent);
1870 INIT_LIST_HEAD(&osdc->req_notarget);
a40c4f10 1871 INIT_LIST_HEAD(&osdc->req_linger);
f24e9980
SW
1872 osdc->num_requests = 0;
1873 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
f5a2041b 1874 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
a40c4f10
YS
1875 spin_lock_init(&osdc->event_lock);
1876 osdc->event_tree = RB_ROOT;
1877 osdc->event_count = 0;
f5a2041b
YS
1878
1879 schedule_delayed_work(&osdc->osds_timeout_work,
3d14c5d2 1880 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
f24e9980 1881
5f44f142 1882 err = -ENOMEM;
f24e9980
SW
1883 osdc->req_mempool = mempool_create_kmalloc_pool(10,
1884 sizeof(struct ceph_osd_request));
1885 if (!osdc->req_mempool)
5f44f142 1886 goto out;
f24e9980 1887
d50b409f
SW
1888 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
1889 OSD_OP_FRONT_LEN, 10, true,
4f48280e 1890 "osd_op");
f24e9980 1891 if (err < 0)
5f44f142 1892 goto out_mempool;
d50b409f 1893 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4f48280e
SW
1894 OSD_OPREPLY_FRONT_LEN, 10, true,
1895 "osd_op_reply");
c16e7869
SW
1896 if (err < 0)
1897 goto out_msgpool;
a40c4f10
YS
1898
1899 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
1900 if (IS_ERR(osdc->notify_wq)) {
1901 err = PTR_ERR(osdc->notify_wq);
1902 osdc->notify_wq = NULL;
1903 goto out_msgpool;
1904 }
f24e9980 1905 return 0;
5f44f142 1906
c16e7869
SW
1907out_msgpool:
1908 ceph_msgpool_destroy(&osdc->msgpool_op);
5f44f142
SW
1909out_mempool:
1910 mempool_destroy(osdc->req_mempool);
1911out:
1912 return err;
f24e9980
SW
1913}
1914
1915void ceph_osdc_stop(struct ceph_osd_client *osdc)
1916{
a40c4f10
YS
1917 flush_workqueue(osdc->notify_wq);
1918 destroy_workqueue(osdc->notify_wq);
f24e9980 1919 cancel_delayed_work_sync(&osdc->timeout_work);
f5a2041b 1920 cancel_delayed_work_sync(&osdc->osds_timeout_work);
f24e9980
SW
1921 if (osdc->osdmap) {
1922 ceph_osdmap_destroy(osdc->osdmap);
1923 osdc->osdmap = NULL;
1924 }
aca420bc 1925 remove_all_osds(osdc);
f24e9980
SW
1926 mempool_destroy(osdc->req_mempool);
1927 ceph_msgpool_destroy(&osdc->msgpool_op);
c16e7869 1928 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
f24e9980
SW
1929}
1930
1931/*
1932 * Read some contiguous pages. If we cross a stripe boundary, shorten
1933 * *plen. Return number of bytes read, or error.
1934 */
1935int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1936 struct ceph_vino vino, struct ceph_file_layout *layout,
1937 u64 off, u64 *plen,
1938 u32 truncate_seq, u64 truncate_size,
b7495fc2 1939 struct page **pages, int num_pages, int page_align)
f24e9980
SW
1940{
1941 struct ceph_osd_request *req;
1942 int rc = 0;
1943
1944 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
1945 vino.snap, off, *plen);
1946 req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
1947 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
1948 NULL, 0, truncate_seq, truncate_size, NULL,
153e5167 1949 false);
6816282d
SW
1950 if (IS_ERR(req))
1951 return PTR_ERR(req);
f24e9980
SW
1952
1953 /* it may be a short read due to an object boundary */
2ac2b7a6 1954 req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES;
2794a82a
AE
1955 req->r_data.pages = pages;
1956 req->r_data.num_pages = calc_pages_for(page_align, *plen);
1957 req->r_data.alignment = page_align;
f24e9980 1958
b7495fc2 1959 dout("readpages final extent is %llu~%llu (%d pages align %d)\n",
2794a82a 1960 off, *plen, req->r_data.num_pages, page_align);
f24e9980
SW
1961
1962 rc = ceph_osdc_start_request(osdc, req, false);
1963 if (!rc)
1964 rc = ceph_osdc_wait_request(osdc, req);
1965
1966 ceph_osdc_put_request(req);
1967 dout("readpages result %d\n", rc);
1968 return rc;
1969}
3d14c5d2 1970EXPORT_SYMBOL(ceph_osdc_readpages);
f24e9980
SW
1971
1972/*
1973 * do a synchronous write on N pages
1974 */
1975int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1976 struct ceph_file_layout *layout,
1977 struct ceph_snap_context *snapc,
1978 u64 off, u64 len,
1979 u32 truncate_seq, u64 truncate_size,
1980 struct timespec *mtime,
24808826 1981 struct page **pages, int num_pages)
f24e9980
SW
1982{
1983 struct ceph_osd_request *req;
1984 int rc = 0;
b7495fc2 1985 int page_align = off & ~PAGE_MASK;
f24e9980
SW
1986
1987 BUG_ON(vino.snap != CEPH_NOSNAP);
1988 req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
1989 CEPH_OSD_OP_WRITE,
24808826 1990 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
fbf8685f 1991 snapc, 0,
f24e9980 1992 truncate_seq, truncate_size, mtime,
153e5167 1993 true);
6816282d
SW
1994 if (IS_ERR(req))
1995 return PTR_ERR(req);
f24e9980
SW
1996
1997 /* it may be a short write due to an object boundary */
2ac2b7a6 1998 req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES;
2794a82a
AE
1999 req->r_data.pages = pages;
2000 req->r_data.num_pages = calc_pages_for(page_align, len);
2001 req->r_data.alignment = page_align;
2002 dout("writepages %llu~%llu (%d pages)\n", off, len, req->r_data.num_pages);
f24e9980 2003
87f979d3 2004 rc = ceph_osdc_start_request(osdc, req, true);
f24e9980
SW
2005 if (!rc)
2006 rc = ceph_osdc_wait_request(osdc, req);
2007
2008 ceph_osdc_put_request(req);
2009 if (rc == 0)
2010 rc = len;
2011 dout("writepages result %d\n", rc);
2012 return rc;
2013}
3d14c5d2 2014EXPORT_SYMBOL(ceph_osdc_writepages);
f24e9980
SW
2015
2016/*
2017 * handle incoming message
2018 */
2019static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2020{
2021 struct ceph_osd *osd = con->private;
32c895e7 2022 struct ceph_osd_client *osdc;
f24e9980
SW
2023 int type = le16_to_cpu(msg->hdr.type);
2024
2025 if (!osd)
4a32f93d 2026 goto out;
32c895e7 2027 osdc = osd->o_osdc;
f24e9980
SW
2028
2029 switch (type) {
2030 case CEPH_MSG_OSD_MAP:
2031 ceph_osdc_handle_map(osdc, msg);
2032 break;
2033 case CEPH_MSG_OSD_OPREPLY:
350b1c32 2034 handle_reply(osdc, msg, con);
f24e9980 2035 break;
a40c4f10
YS
2036 case CEPH_MSG_WATCH_NOTIFY:
2037 handle_watch_notify(osdc, msg);
2038 break;
f24e9980
SW
2039
2040 default:
2041 pr_err("received unknown message type %d %s\n", type,
2042 ceph_msg_type_name(type));
2043 }
4a32f93d 2044out:
f24e9980
SW
2045 ceph_msg_put(msg);
2046}
2047
5b3a4db3 2048/*
21b667f6
SW
2049 * lookup and return message for incoming reply. set up reply message
2050 * pages.
5b3a4db3
SW
2051 */
2052static struct ceph_msg *get_reply(struct ceph_connection *con,
2450418c
YS
2053 struct ceph_msg_header *hdr,
2054 int *skip)
f24e9980
SW
2055{
2056 struct ceph_osd *osd = con->private;
2057 struct ceph_osd_client *osdc = osd->o_osdc;
2450418c 2058 struct ceph_msg *m;
0547a9b3 2059 struct ceph_osd_request *req;
5b3a4db3
SW
2060 int front = le32_to_cpu(hdr->front_len);
2061 int data_len = le32_to_cpu(hdr->data_len);
0547a9b3 2062 u64 tid;
f24e9980 2063
0547a9b3
YS
2064 tid = le64_to_cpu(hdr->tid);
2065 mutex_lock(&osdc->request_mutex);
2066 req = __lookup_request(osdc, tid);
2067 if (!req) {
2068 *skip = 1;
2069 m = NULL;
756a16a5
SW
2070 dout("get_reply unknown tid %llu from osd%d\n", tid,
2071 osd->o_osd);
0547a9b3
YS
2072 goto out;
2073 }
c16e7869
SW
2074
2075 if (req->r_con_filling_msg) {
8921d114 2076 dout("%s revoking msg %p from old con %p\n", __func__,
c16e7869 2077 req->r_reply, req->r_con_filling_msg);
8921d114 2078 ceph_msg_revoke_incoming(req->r_reply);
0d47766f 2079 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
6f46cb29 2080 req->r_con_filling_msg = NULL;
0547a9b3
YS
2081 }
2082
c16e7869
SW
2083 if (front > req->r_reply->front.iov_len) {
2084 pr_warning("get_reply front %d > preallocated %d\n",
2085 front, (int)req->r_reply->front.iov_len);
b61c2763 2086 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
a79832f2 2087 if (!m)
c16e7869
SW
2088 goto out;
2089 ceph_msg_put(req->r_reply);
2090 req->r_reply = m;
2091 }
2092 m = ceph_msg_get(req->r_reply);
2093
0547a9b3 2094 if (data_len > 0) {
2ac2b7a6
AE
2095 if (req->r_data.type == CEPH_OSD_DATA_TYPE_PAGES) {
2096 int want;
2097
2098 want = calc_pages_for(req->r_data.alignment, data_len);
2099 if (req->r_data.pages &&
2100 unlikely(req->r_data.num_pages < want)) {
2101
2102 pr_warning("tid %lld reply has %d bytes %d "
2103 "pages, we had only %d pages ready\n",
2104 tid, data_len, want,
2105 req->r_data.num_pages);
2106 *skip = 1;
2107 ceph_msg_put(m);
2108 m = NULL;
2109 goto out;
2110 }
2111 m->pages = req->r_data.pages;
2112 m->page_count = req->r_data.num_pages;
2113 m->page_alignment = req->r_data.alignment;
68b4476b 2114#ifdef CONFIG_BLOCK
2ac2b7a6
AE
2115 } else if (req->r_data.type == CEPH_OSD_DATA_TYPE_BIO) {
2116 m->bio = req->r_data.bio;
68b4476b 2117#endif
2ac2b7a6 2118 }
0547a9b3 2119 }
5b3a4db3 2120 *skip = 0;
0d47766f 2121 req->r_con_filling_msg = con->ops->get(con);
c16e7869 2122 dout("get_reply tid %lld %p\n", tid, m);
0547a9b3
YS
2123
2124out:
2125 mutex_unlock(&osdc->request_mutex);
2450418c 2126 return m;
5b3a4db3
SW
2127
2128}
2129
2130static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2131 struct ceph_msg_header *hdr,
2132 int *skip)
2133{
2134 struct ceph_osd *osd = con->private;
2135 int type = le16_to_cpu(hdr->type);
2136 int front = le32_to_cpu(hdr->front_len);
2137
1c20f2d2 2138 *skip = 0;
5b3a4db3
SW
2139 switch (type) {
2140 case CEPH_MSG_OSD_MAP:
a40c4f10 2141 case CEPH_MSG_WATCH_NOTIFY:
b61c2763 2142 return ceph_msg_new(type, front, GFP_NOFS, false);
5b3a4db3
SW
2143 case CEPH_MSG_OSD_OPREPLY:
2144 return get_reply(con, hdr, skip);
2145 default:
2146 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
2147 osd->o_osd);
2148 *skip = 1;
2149 return NULL;
2150 }
f24e9980
SW
2151}
2152
2153/*
2154 * Wrappers to refcount containing ceph_osd struct
2155 */
2156static struct ceph_connection *get_osd_con(struct ceph_connection *con)
2157{
2158 struct ceph_osd *osd = con->private;
2159 if (get_osd(osd))
2160 return con;
2161 return NULL;
2162}
2163
2164static void put_osd_con(struct ceph_connection *con)
2165{
2166 struct ceph_osd *osd = con->private;
2167 put_osd(osd);
2168}
2169
4e7a5dcd
SW
2170/*
2171 * authentication
2172 */
a3530df3
AE
2173/*
2174 * Note: returned pointer is the address of a structure that's
2175 * managed separately. Caller must *not* attempt to free it.
2176 */
2177static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
8f43fb53 2178 int *proto, int force_new)
4e7a5dcd
SW
2179{
2180 struct ceph_osd *o = con->private;
2181 struct ceph_osd_client *osdc = o->o_osdc;
2182 struct ceph_auth_client *ac = osdc->client->monc.auth;
74f1869f 2183 struct ceph_auth_handshake *auth = &o->o_auth;
4e7a5dcd 2184
74f1869f 2185 if (force_new && auth->authorizer) {
a255651d
AE
2186 if (ac->ops && ac->ops->destroy_authorizer)
2187 ac->ops->destroy_authorizer(ac, auth->authorizer);
74f1869f
AE
2188 auth->authorizer = NULL;
2189 }
a255651d 2190 if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) {
a3530df3
AE
2191 int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2192 auth);
4e7a5dcd 2193 if (ret)
a3530df3 2194 return ERR_PTR(ret);
4e7a5dcd 2195 }
4e7a5dcd 2196 *proto = ac->protocol;
74f1869f 2197
a3530df3 2198 return auth;
4e7a5dcd
SW
2199}
2200
2201
2202static int verify_authorizer_reply(struct ceph_connection *con, int len)
2203{
2204 struct ceph_osd *o = con->private;
2205 struct ceph_osd_client *osdc = o->o_osdc;
2206 struct ceph_auth_client *ac = osdc->client->monc.auth;
2207
a255651d
AE
2208 /*
2209 * XXX If ac->ops or ac->ops->verify_authorizer_reply is null,
2210 * XXX which do we do: succeed or fail?
2211 */
6c4a1915 2212 return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len);
4e7a5dcd
SW
2213}
2214
9bd2e6f8
SW
2215static int invalidate_authorizer(struct ceph_connection *con)
2216{
2217 struct ceph_osd *o = con->private;
2218 struct ceph_osd_client *osdc = o->o_osdc;
2219 struct ceph_auth_client *ac = osdc->client->monc.auth;
2220
a255651d 2221 if (ac->ops && ac->ops->invalidate_authorizer)
9bd2e6f8
SW
2222 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
2223
2224 return ceph_monc_validate_auth(&osdc->client->monc);
2225}
4e7a5dcd 2226
9e32789f 2227static const struct ceph_connection_operations osd_con_ops = {
f24e9980
SW
2228 .get = get_osd_con,
2229 .put = put_osd_con,
2230 .dispatch = dispatch,
4e7a5dcd
SW
2231 .get_authorizer = get_authorizer,
2232 .verify_authorizer_reply = verify_authorizer_reply,
9bd2e6f8 2233 .invalidate_authorizer = invalidate_authorizer,
f24e9980 2234 .alloc_msg = alloc_msg,
81b024e7 2235 .fault = osd_reset,
f24e9980 2236};