ceph: kill ceph alloc_page_vec()
[linux-2.6-block.git] / net / ceph / osd_client.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
f24e9980 2
3d14c5d2 3#include <linux/module.h>
f24e9980
SW
4#include <linux/err.h>
5#include <linux/highmem.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/slab.h>
9#include <linux/uaccess.h>
68b4476b
YS
10#ifdef CONFIG_BLOCK
11#include <linux/bio.h>
12#endif
f24e9980 13
3d14c5d2
YS
14#include <linux/ceph/libceph.h>
15#include <linux/ceph/osd_client.h>
16#include <linux/ceph/messenger.h>
17#include <linux/ceph/decode.h>
18#include <linux/ceph/auth.h>
19#include <linux/ceph/pagelist.h>
f24e9980 20
c16e7869
SW
21#define OSD_OP_FRONT_LEN 4096
22#define OSD_OPREPLY_FRONT_LEN 512
0d59ab81 23
9e32789f 24static const struct ceph_connection_operations osd_con_ops;
f24e9980 25
f9d25199 26static void __send_queued(struct ceph_osd_client *osdc);
6f6c7006 27static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
a40c4f10
YS
28static void __register_request(struct ceph_osd_client *osdc,
29 struct ceph_osd_request *req);
30static void __unregister_linger_request(struct ceph_osd_client *osdc,
31 struct ceph_osd_request *req);
56e925b6
SW
32static void __send_request(struct ceph_osd_client *osdc,
33 struct ceph_osd_request *req);
f24e9980
SW
34
35/*
36 * Implement client access to distributed object storage cluster.
37 *
38 * All data objects are stored within a cluster/cloud of OSDs, or
39 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
40 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
41 * remote daemons serving up and coordinating consistent and safe
42 * access to storage.
43 *
44 * Cluster membership and the mapping of data objects onto storage devices
45 * are described by the osd map.
46 *
47 * We keep track of pending OSD requests (read, write), resubmit
48 * requests to different OSDs when the cluster topology/data layout
49 * change, or retry the affected requests when the communications
50 * channel with an OSD is reset.
51 */
52
53/*
54 * calculate the mapping of a file extent onto an object, and fill out the
55 * request accordingly. shorten extent as necessary if it crosses an
56 * object boundary.
57 *
58 * fill osd op in request message.
59 */
dbe0fc41 60static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
a19dadfb 61 u64 *objnum, u64 *objoff, u64 *objlen)
f24e9980 62{
60e56f13 63 u64 orig_len = *plen;
d63b77f4 64 int r;
f24e9980 65
60e56f13 66 /* object extent? */
75d1c941
AE
67 r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
68 objoff, objlen);
d63b77f4
SW
69 if (r < 0)
70 return r;
75d1c941
AE
71 if (*objlen < orig_len) {
72 *plen = *objlen;
60e56f13
AE
73 dout(" skipping last %llu, final file extent %llu~%llu\n",
74 orig_len - *plen, off, *plen);
75 }
76
75d1c941 77 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
f24e9980 78
3ff5f385 79 return 0;
f24e9980
SW
80}
81
f24e9980
SW
82/*
83 * requests
84 */
415e49a9 85void ceph_osdc_release_request(struct kref *kref)
f24e9980 86{
e0c59487 87 int num_pages;
415e49a9
SW
88 struct ceph_osd_request *req = container_of(kref,
89 struct ceph_osd_request,
90 r_kref);
91
92 if (req->r_request)
93 ceph_msg_put(req->r_request);
ace6d3a9 94 if (req->r_reply) {
8921d114 95 ceph_msg_revoke_incoming(req->r_reply);
ab8cb34a 96 ceph_msg_put(req->r_reply);
ace6d3a9 97 }
0fff87ec
AE
98
99 if (req->r_data_in.type == CEPH_OSD_DATA_TYPE_PAGES &&
e0c59487
AE
100 req->r_data_in.own_pages) {
101 num_pages = calc_pages_for((u64)req->r_data_in.alignment,
102 (u64)req->r_data_in.length);
103 ceph_release_page_vector(req->r_data_in.pages, num_pages);
104 }
0fff87ec 105 if (req->r_data_out.type == CEPH_OSD_DATA_TYPE_PAGES &&
e0c59487
AE
106 req->r_data_out.own_pages) {
107 num_pages = calc_pages_for((u64)req->r_data_out.alignment,
108 (u64)req->r_data_out.length);
109 ceph_release_page_vector(req->r_data_out.pages, num_pages);
110 }
0fff87ec 111
415e49a9
SW
112 ceph_put_snap_context(req->r_snapc);
113 if (req->r_mempool)
114 mempool_free(req, req->r_osdc->req_mempool);
115 else
116 kfree(req);
f24e9980 117}
3d14c5d2 118EXPORT_SYMBOL(ceph_osdc_release_request);
68b4476b 119
3499e8a5 120struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
f24e9980 121 struct ceph_snap_context *snapc,
1b83bef2 122 unsigned int num_ops,
3499e8a5 123 bool use_mempool,
54a54007 124 gfp_t gfp_flags)
f24e9980
SW
125{
126 struct ceph_osd_request *req;
127 struct ceph_msg *msg;
1b83bef2
SW
128 size_t msg_size;
129
130 msg_size = 4 + 4 + 8 + 8 + 4+8;
131 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
132 msg_size += 1 + 8 + 4 + 4; /* pg_t */
133 msg_size += 4 + MAX_OBJ_NAME_SIZE;
134 msg_size += 2 + num_ops*sizeof(struct ceph_osd_op);
135 msg_size += 8; /* snapid */
136 msg_size += 8; /* snap_seq */
137 msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
138 msg_size += 4;
f24e9980
SW
139
140 if (use_mempool) {
3499e8a5 141 req = mempool_alloc(osdc->req_mempool, gfp_flags);
f24e9980
SW
142 memset(req, 0, sizeof(*req));
143 } else {
3499e8a5 144 req = kzalloc(sizeof(*req), gfp_flags);
f24e9980
SW
145 }
146 if (req == NULL)
a79832f2 147 return NULL;
f24e9980 148
f24e9980
SW
149 req->r_osdc = osdc;
150 req->r_mempool = use_mempool;
68b4476b 151
415e49a9 152 kref_init(&req->r_kref);
f24e9980
SW
153 init_completion(&req->r_completion);
154 init_completion(&req->r_safe_completion);
a978fa20 155 RB_CLEAR_NODE(&req->r_node);
f24e9980 156 INIT_LIST_HEAD(&req->r_unsafe_item);
a40c4f10
YS
157 INIT_LIST_HEAD(&req->r_linger_item);
158 INIT_LIST_HEAD(&req->r_linger_osd);
935b639a 159 INIT_LIST_HEAD(&req->r_req_lru_item);
cd43045c
SW
160 INIT_LIST_HEAD(&req->r_osd_item);
161
c16e7869
SW
162 /* create reply message */
163 if (use_mempool)
164 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
165 else
166 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
b61c2763 167 OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
a79832f2 168 if (!msg) {
c16e7869 169 ceph_osdc_put_request(req);
a79832f2 170 return NULL;
c16e7869
SW
171 }
172 req->r_reply = msg;
173
0fff87ec
AE
174 req->r_data_in.type = CEPH_OSD_DATA_TYPE_NONE;
175 req->r_data_out.type = CEPH_OSD_DATA_TYPE_NONE;
d50b409f 176
c16e7869 177 /* create request message; allow space for oid */
f24e9980 178 if (use_mempool)
8f3bc053 179 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
f24e9980 180 else
b61c2763 181 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
a79832f2 182 if (!msg) {
f24e9980 183 ceph_osdc_put_request(req);
a79832f2 184 return NULL;
f24e9980 185 }
68b4476b 186
f24e9980 187 memset(msg->front.iov_base, 0, msg->front.iov_len);
3499e8a5
YS
188
189 req->r_request = msg;
3499e8a5
YS
190
191 return req;
192}
3d14c5d2 193EXPORT_SYMBOL(ceph_osdc_alloc_request);
3499e8a5 194
a8dd0a37 195static bool osd_req_opcode_valid(u16 opcode)
68b4476b 196{
a8dd0a37 197 switch (opcode) {
68b4476b 198 case CEPH_OSD_OP_READ:
a8dd0a37 199 case CEPH_OSD_OP_STAT:
4c46459c
AE
200 case CEPH_OSD_OP_MAPEXT:
201 case CEPH_OSD_OP_MASKTRUNC:
202 case CEPH_OSD_OP_SPARSE_READ:
a9f36c3e 203 case CEPH_OSD_OP_NOTIFY:
a8dd0a37 204 case CEPH_OSD_OP_NOTIFY_ACK:
4c46459c 205 case CEPH_OSD_OP_ASSERT_VER:
a8dd0a37 206 case CEPH_OSD_OP_WRITE:
4c46459c
AE
207 case CEPH_OSD_OP_WRITEFULL:
208 case CEPH_OSD_OP_TRUNCATE:
209 case CEPH_OSD_OP_ZERO:
210 case CEPH_OSD_OP_DELETE:
211 case CEPH_OSD_OP_APPEND:
a8dd0a37 212 case CEPH_OSD_OP_STARTSYNC:
4c46459c
AE
213 case CEPH_OSD_OP_SETTRUNC:
214 case CEPH_OSD_OP_TRIMTRUNC:
215 case CEPH_OSD_OP_TMAPUP:
216 case CEPH_OSD_OP_TMAPPUT:
217 case CEPH_OSD_OP_TMAPGET:
218 case CEPH_OSD_OP_CREATE:
a9f36c3e 219 case CEPH_OSD_OP_ROLLBACK:
a8dd0a37 220 case CEPH_OSD_OP_WATCH:
4c46459c
AE
221 case CEPH_OSD_OP_OMAPGETKEYS:
222 case CEPH_OSD_OP_OMAPGETVALS:
223 case CEPH_OSD_OP_OMAPGETHEADER:
224 case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
4c46459c
AE
225 case CEPH_OSD_OP_OMAPSETVALS:
226 case CEPH_OSD_OP_OMAPSETHEADER:
227 case CEPH_OSD_OP_OMAPCLEAR:
228 case CEPH_OSD_OP_OMAPRMKEYS:
229 case CEPH_OSD_OP_OMAP_CMP:
230 case CEPH_OSD_OP_CLONERANGE:
231 case CEPH_OSD_OP_ASSERT_SRC_VERSION:
232 case CEPH_OSD_OP_SRC_CMPXATTR:
a9f36c3e 233 case CEPH_OSD_OP_GETXATTR:
4c46459c 234 case CEPH_OSD_OP_GETXATTRS:
a9f36c3e
AE
235 case CEPH_OSD_OP_CMPXATTR:
236 case CEPH_OSD_OP_SETXATTR:
4c46459c
AE
237 case CEPH_OSD_OP_SETXATTRS:
238 case CEPH_OSD_OP_RESETXATTRS:
239 case CEPH_OSD_OP_RMXATTR:
240 case CEPH_OSD_OP_PULL:
241 case CEPH_OSD_OP_PUSH:
242 case CEPH_OSD_OP_BALANCEREADS:
243 case CEPH_OSD_OP_UNBALANCEREADS:
244 case CEPH_OSD_OP_SCRUB:
245 case CEPH_OSD_OP_SCRUB_RESERVE:
246 case CEPH_OSD_OP_SCRUB_UNRESERVE:
247 case CEPH_OSD_OP_SCRUB_STOP:
248 case CEPH_OSD_OP_SCRUB_MAP:
249 case CEPH_OSD_OP_WRLOCK:
250 case CEPH_OSD_OP_WRUNLOCK:
251 case CEPH_OSD_OP_RDLOCK:
252 case CEPH_OSD_OP_RDUNLOCK:
253 case CEPH_OSD_OP_UPLOCK:
254 case CEPH_OSD_OP_DNLOCK:
a8dd0a37 255 case CEPH_OSD_OP_CALL:
4c46459c
AE
256 case CEPH_OSD_OP_PGLS:
257 case CEPH_OSD_OP_PGLS_FILTER:
a8dd0a37
AE
258 return true;
259 default:
260 return false;
261 }
262}
263
33803f33
AE
264/*
265 * This is an osd op init function for opcodes that have no data or
266 * other information associated with them. It also serves as a
267 * common init routine for all the other init functions, below.
268 */
269void osd_req_op_init(struct ceph_osd_req_op *op, u16 opcode)
270{
271 BUG_ON(!osd_req_opcode_valid(opcode));
272
273 memset(op, 0, sizeof (*op));
274
275 op->op = opcode;
276}
277
278void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode,
279 u64 offset, u64 length,
280 u64 truncate_size, u32 truncate_seq)
281{
282 size_t payload_len = 0;
283
284 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE);
285
286 osd_req_op_init(op, opcode);
287
288 op->extent.offset = offset;
289 op->extent.length = length;
290 op->extent.truncate_size = truncate_size;
291 op->extent.truncate_seq = truncate_seq;
292 if (opcode == CEPH_OSD_OP_WRITE)
293 payload_len += length;
294
295 op->payload_len = payload_len;
296}
297EXPORT_SYMBOL(osd_req_op_extent_init);
298
299void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode,
300 const char *class, const char *method,
301 const void *request_data, size_t request_data_size)
302{
303 size_t payload_len = 0;
304 size_t size;
305
306 BUG_ON(opcode != CEPH_OSD_OP_CALL);
307
308 osd_req_op_init(op, opcode);
309
310 op->cls.class_name = class;
311 size = strlen(class);
312 BUG_ON(size > (size_t) U8_MAX);
313 op->cls.class_len = size;
314 payload_len += size;
315
316 op->cls.method_name = method;
317 size = strlen(method);
318 BUG_ON(size > (size_t) U8_MAX);
319 op->cls.method_len = size;
320 payload_len += size;
321
322 op->cls.indata = request_data;
323 BUG_ON(request_data_size > (size_t) U32_MAX);
324 op->cls.indata_len = (u32) request_data_size;
325 payload_len += request_data_size;
326
327 op->cls.argc = 0; /* currently unused */
328
329 op->payload_len = payload_len;
330}
331EXPORT_SYMBOL(osd_req_op_cls_init);
332
333void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode,
334 u64 cookie, u64 version, int flag)
335{
336 BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH);
337
338 osd_req_op_init(op, opcode);
339
340 op->watch.cookie = cookie;
341 /* op->watch.ver = version; */ /* XXX 3847 */
342 op->watch.ver = cpu_to_le64(version);
343 if (opcode == CEPH_OSD_OP_WATCH && flag)
344 op->watch.flag = (u8) 1;
345}
346EXPORT_SYMBOL(osd_req_op_watch_init);
347
a8dd0a37
AE
348static u64 osd_req_encode_op(struct ceph_osd_request *req,
349 struct ceph_osd_op *dst,
350 struct ceph_osd_req_op *src)
351{
352 u64 out_data_len = 0;
353 struct ceph_pagelist *pagelist;
354
355 if (WARN_ON(!osd_req_opcode_valid(src->op))) {
356 pr_err("unrecognized osd opcode %d\n", src->op);
357
358 return 0;
359 }
360
361 switch (src->op) {
362 case CEPH_OSD_OP_STAT:
363 break;
364 case CEPH_OSD_OP_READ:
365 case CEPH_OSD_OP_WRITE:
366 if (src->op == CEPH_OSD_OP_WRITE)
367 out_data_len = src->extent.length;
368 dst->extent.offset = cpu_to_le64(src->extent.offset);
369 dst->extent.length = cpu_to_le64(src->extent.length);
370 dst->extent.truncate_size =
371 cpu_to_le64(src->extent.truncate_size);
372 dst->extent.truncate_seq =
373 cpu_to_le32(src->extent.truncate_seq);
374 break;
375 case CEPH_OSD_OP_CALL:
376 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
377 BUG_ON(!pagelist);
378 ceph_pagelist_init(pagelist);
379
380 dst->cls.class_len = src->cls.class_len;
381 dst->cls.method_len = src->cls.method_len;
382 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
383 ceph_pagelist_append(pagelist, src->cls.class_name,
384 src->cls.class_len);
385 ceph_pagelist_append(pagelist, src->cls.method_name,
386 src->cls.method_len);
387 ceph_pagelist_append(pagelist, src->cls.indata,
388 src->cls.indata_len);
389
390 req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGELIST;
391 req->r_data_out.pagelist = pagelist;
392 out_data_len = pagelist->length;
393 break;
394 case CEPH_OSD_OP_STARTSYNC:
395 break;
396 case CEPH_OSD_OP_NOTIFY_ACK:
397 case CEPH_OSD_OP_WATCH:
398 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
399 dst->watch.ver = cpu_to_le64(src->watch.ver);
400 dst->watch.flag = src->watch.flag;
401 break;
402 default:
4c46459c 403 pr_err("unsupported osd opcode %s\n",
8f63ca2d 404 ceph_osd_op_name(src->op));
4c46459c 405 WARN_ON(1);
a8dd0a37
AE
406
407 return 0;
68b4476b 408 }
a8dd0a37 409 dst->op = cpu_to_le16(src->op);
68b4476b 410 dst->payload_len = cpu_to_le32(src->payload_len);
175face2
AE
411
412 return out_data_len;
68b4476b
YS
413}
414
3499e8a5
YS
415/*
416 * build new request AND message
417 *
418 */
419void ceph_osdc_build_request(struct ceph_osd_request *req,
175face2 420 u64 off, unsigned int num_ops,
68b4476b 421 struct ceph_osd_req_op *src_ops,
4d6b250b 422 struct ceph_snap_context *snapc, u64 snap_id,
af77f26c 423 struct timespec *mtime)
3499e8a5
YS
424{
425 struct ceph_msg *msg = req->r_request;
68b4476b 426 struct ceph_osd_req_op *src_op;
3499e8a5 427 void *p;
1b83bef2 428 size_t msg_size;
3499e8a5 429 int flags = req->r_flags;
f44246e3 430 u64 data_len;
68b4476b 431 int i;
3499e8a5 432
1b83bef2
SW
433 req->r_num_ops = num_ops;
434 req->r_snapid = snap_id;
f24e9980
SW
435 req->r_snapc = ceph_get_snap_context(snapc);
436
1b83bef2
SW
437 /* encode request */
438 msg->hdr.version = cpu_to_le16(4);
439
440 p = msg->front.iov_base;
441 ceph_encode_32(&p, 1); /* client_inc is always 1 */
442 req->r_request_osdmap_epoch = p;
443 p += 4;
444 req->r_request_flags = p;
445 p += 4;
446 if (req->r_flags & CEPH_OSD_FLAG_WRITE)
447 ceph_encode_timespec(p, mtime);
448 p += sizeof(struct ceph_timespec);
449 req->r_request_reassert_version = p;
450 p += sizeof(struct ceph_eversion); /* will get filled in */
451
452 /* oloc */
453 ceph_encode_8(&p, 4);
454 ceph_encode_8(&p, 4);
455 ceph_encode_32(&p, 8 + 4 + 4);
456 req->r_request_pool = p;
457 p += 8;
458 ceph_encode_32(&p, -1); /* preferred */
459 ceph_encode_32(&p, 0); /* key len */
460
461 ceph_encode_8(&p, 1);
462 req->r_request_pgid = p;
463 p += 8 + 4;
464 ceph_encode_32(&p, -1); /* preferred */
465
466 /* oid */
467 ceph_encode_32(&p, req->r_oid_len);
af77f26c 468 memcpy(p, req->r_oid, req->r_oid_len);
1b83bef2 469 dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len);
af77f26c 470 p += req->r_oid_len;
f24e9980 471
175face2 472 /* ops--can imply data */
1b83bef2 473 ceph_encode_16(&p, num_ops);
68b4476b 474 src_op = src_ops;
1b83bef2 475 req->r_request_ops = p;
175face2 476 data_len = 0;
1b83bef2 477 for (i = 0; i < num_ops; i++, src_op++) {
175face2 478 data_len += osd_req_encode_op(req, p, src_op);
1b83bef2
SW
479 p += sizeof(struct ceph_osd_op);
480 }
68b4476b 481
1b83bef2
SW
482 /* snaps */
483 ceph_encode_64(&p, req->r_snapid);
484 ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
485 ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
486 if (req->r_snapc) {
f24e9980 487 for (i = 0; i < snapc->num_snaps; i++) {
1b83bef2 488 ceph_encode_64(&p, req->r_snapc->snaps[i]);
f24e9980
SW
489 }
490 }
491
1b83bef2
SW
492 req->r_request_attempts = p;
493 p += 4;
494
175face2 495 /* data */
0baa1bd9
AE
496 if (flags & CEPH_OSD_FLAG_WRITE) {
497 u16 data_off;
498
499 /*
500 * The header "data_off" is a hint to the receiver
501 * allowing it to align received data into its
502 * buffers such that there's no need to re-copy
503 * it before writing it to disk (direct I/O).
504 */
505 data_off = (u16) (off & 0xffff);
506 req->r_request->hdr.data_off = cpu_to_le16(data_off);
507 }
f44246e3 508 req->r_request->hdr.data_len = cpu_to_le32(data_len);
c5c6b19d 509
f24e9980 510 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
6f863e71
SW
511 msg_size = p - msg->front.iov_base;
512 msg->front.iov_len = msg_size;
513 msg->hdr.front_len = cpu_to_le32(msg_size);
1b83bef2 514
acead002 515 dout("build_request msg_size was %d\n", (int)msg_size);
3499e8a5 516}
3d14c5d2 517EXPORT_SYMBOL(ceph_osdc_build_request);
3499e8a5
YS
518
519/*
520 * build new request AND message, calculate layout, and adjust file
521 * extent as needed.
522 *
523 * if the file was recently truncated, we include information about its
524 * old and new size so that the object can be updated appropriately. (we
525 * avoid synchronously deleting truncated objects because it's slow.)
526 *
527 * if @do_sync, include a 'startsync' command so that the osd will flush
528 * data quickly.
529 */
530struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
531 struct ceph_file_layout *layout,
532 struct ceph_vino vino,
acead002
AE
533 u64 off, u64 *plen, int num_ops,
534 struct ceph_osd_req_op *ops,
3499e8a5
YS
535 int opcode, int flags,
536 struct ceph_snap_context *snapc,
3499e8a5
YS
537 u32 truncate_seq,
538 u64 truncate_size,
153e5167 539 bool use_mempool)
3499e8a5 540{
68b4476b 541 struct ceph_osd_request *req;
75d1c941
AE
542 u64 objnum = 0;
543 u64 objoff = 0;
544 u64 objlen = 0;
d18d1e28
AE
545 u32 object_size;
546 u64 object_base;
6816282d 547 int r;
68b4476b 548
d18d1e28 549 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE);
68b4476b 550
acead002 551 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
ae7ca4a3 552 GFP_NOFS);
4ad12621 553 if (!req)
6816282d 554 return ERR_PTR(-ENOMEM);
d178a9e7 555 req->r_flags = flags;
3499e8a5
YS
556
557 /* calculate max write size */
a19dadfb 558 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
3ff5f385
AE
559 if (r < 0) {
560 ceph_osdc_put_request(req);
6816282d 561 return ERR_PTR(r);
3ff5f385 562 }
a19dadfb 563
d18d1e28
AE
564 object_size = le32_to_cpu(layout->fl_object_size);
565 object_base = off - objoff;
566 if (truncate_size <= object_base) {
567 truncate_size = 0;
568 } else {
569 truncate_size -= object_base;
570 if (truncate_size > object_size)
571 truncate_size = object_size;
a19dadfb 572 }
d18d1e28 573
b0270324
AE
574 osd_req_op_extent_init(&ops[0], opcode, objoff, objlen,
575 truncate_size, truncate_seq);
acead002
AE
576 /*
577 * A second op in the ops array means the caller wants to
578 * also issue a include a 'startsync' command so that the
579 * osd will flush data quickly.
580 */
581 if (num_ops > 1)
b0270324 582 osd_req_op_init(&ops[1], CEPH_OSD_OP_STARTSYNC);
d18d1e28 583
3499e8a5
YS
584 req->r_file_layout = *layout; /* keep a copy */
585
75d1c941
AE
586 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx",
587 vino.ino, objnum);
dbe0fc41
AE
588 req->r_oid_len = strlen(req->r_oid);
589
f24e9980
SW
590 return req;
591}
3d14c5d2 592EXPORT_SYMBOL(ceph_osdc_new_request);
f24e9980
SW
593
594/*
595 * We keep osd requests in an rbtree, sorted by ->r_tid.
596 */
597static void __insert_request(struct ceph_osd_client *osdc,
598 struct ceph_osd_request *new)
599{
600 struct rb_node **p = &osdc->requests.rb_node;
601 struct rb_node *parent = NULL;
602 struct ceph_osd_request *req = NULL;
603
604 while (*p) {
605 parent = *p;
606 req = rb_entry(parent, struct ceph_osd_request, r_node);
607 if (new->r_tid < req->r_tid)
608 p = &(*p)->rb_left;
609 else if (new->r_tid > req->r_tid)
610 p = &(*p)->rb_right;
611 else
612 BUG();
613 }
614
615 rb_link_node(&new->r_node, parent, p);
616 rb_insert_color(&new->r_node, &osdc->requests);
617}
618
619static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
620 u64 tid)
621{
622 struct ceph_osd_request *req;
623 struct rb_node *n = osdc->requests.rb_node;
624
625 while (n) {
626 req = rb_entry(n, struct ceph_osd_request, r_node);
627 if (tid < req->r_tid)
628 n = n->rb_left;
629 else if (tid > req->r_tid)
630 n = n->rb_right;
631 else
632 return req;
633 }
634 return NULL;
635}
636
637static struct ceph_osd_request *
638__lookup_request_ge(struct ceph_osd_client *osdc,
639 u64 tid)
640{
641 struct ceph_osd_request *req;
642 struct rb_node *n = osdc->requests.rb_node;
643
644 while (n) {
645 req = rb_entry(n, struct ceph_osd_request, r_node);
646 if (tid < req->r_tid) {
647 if (!n->rb_left)
648 return req;
649 n = n->rb_left;
650 } else if (tid > req->r_tid) {
651 n = n->rb_right;
652 } else {
653 return req;
654 }
655 }
656 return NULL;
657}
658
6f6c7006
SW
659/*
660 * Resubmit requests pending on the given osd.
661 */
662static void __kick_osd_requests(struct ceph_osd_client *osdc,
663 struct ceph_osd *osd)
664{
a40c4f10 665 struct ceph_osd_request *req, *nreq;
e02493c0 666 LIST_HEAD(resend);
6f6c7006
SW
667 int err;
668
669 dout("__kick_osd_requests osd%d\n", osd->o_osd);
670 err = __reset_osd(osdc, osd);
685a7555 671 if (err)
6f6c7006 672 return;
e02493c0
AE
673 /*
674 * Build up a list of requests to resend by traversing the
675 * osd's list of requests. Requests for a given object are
676 * sent in tid order, and that is also the order they're
677 * kept on this list. Therefore all requests that are in
678 * flight will be found first, followed by all requests that
679 * have not yet been sent. And to resend requests while
680 * preserving this order we will want to put any sent
681 * requests back on the front of the osd client's unsent
682 * list.
683 *
684 * So we build a separate ordered list of already-sent
685 * requests for the affected osd and splice it onto the
686 * front of the osd client's unsent list. Once we've seen a
687 * request that has not yet been sent we're done. Those
688 * requests are already sitting right where they belong.
689 */
6f6c7006 690 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
e02493c0
AE
691 if (!req->r_sent)
692 break;
693 list_move_tail(&req->r_req_lru_item, &resend);
694 dout("requeueing %p tid %llu osd%d\n", req, req->r_tid,
6f6c7006 695 osd->o_osd);
a40c4f10
YS
696 if (!req->r_linger)
697 req->r_flags |= CEPH_OSD_FLAG_RETRY;
698 }
e02493c0 699 list_splice(&resend, &osdc->req_unsent);
a40c4f10 700
e02493c0
AE
701 /*
702 * Linger requests are re-registered before sending, which
703 * sets up a new tid for each. We add them to the unsent
704 * list at the end to keep things in tid order.
705 */
a40c4f10
YS
706 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
707 r_linger_osd) {
77f38e0e
SW
708 /*
709 * reregister request prior to unregistering linger so
710 * that r_osd is preserved.
711 */
712 BUG_ON(!list_empty(&req->r_req_lru_item));
a40c4f10 713 __register_request(osdc, req);
e02493c0 714 list_add_tail(&req->r_req_lru_item, &osdc->req_unsent);
ad885927 715 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
77f38e0e 716 __unregister_linger_request(osdc, req);
a40c4f10
YS
717 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
718 osd->o_osd);
6f6c7006
SW
719 }
720}
721
f24e9980 722/*
81b024e7 723 * If the osd connection drops, we need to resubmit all requests.
f24e9980
SW
724 */
725static void osd_reset(struct ceph_connection *con)
726{
727 struct ceph_osd *osd = con->private;
728 struct ceph_osd_client *osdc;
729
730 if (!osd)
731 return;
732 dout("osd_reset osd%d\n", osd->o_osd);
733 osdc = osd->o_osdc;
f24e9980 734 down_read(&osdc->map_sem);
83aff95e
SW
735 mutex_lock(&osdc->request_mutex);
736 __kick_osd_requests(osdc, osd);
f9d25199 737 __send_queued(osdc);
83aff95e 738 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
739 up_read(&osdc->map_sem);
740}
741
742/*
743 * Track open sessions with osds.
744 */
e10006f8 745static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
f24e9980
SW
746{
747 struct ceph_osd *osd;
748
749 osd = kzalloc(sizeof(*osd), GFP_NOFS);
750 if (!osd)
751 return NULL;
752
753 atomic_set(&osd->o_ref, 1);
754 osd->o_osdc = osdc;
e10006f8 755 osd->o_osd = onum;
f407731d 756 RB_CLEAR_NODE(&osd->o_node);
f24e9980 757 INIT_LIST_HEAD(&osd->o_requests);
a40c4f10 758 INIT_LIST_HEAD(&osd->o_linger_requests);
f5a2041b 759 INIT_LIST_HEAD(&osd->o_osd_lru);
f24e9980
SW
760 osd->o_incarnation = 1;
761
b7a9e5dd 762 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
4e7a5dcd 763
422d2cb8 764 INIT_LIST_HEAD(&osd->o_keepalive_item);
f24e9980
SW
765 return osd;
766}
767
768static struct ceph_osd *get_osd(struct ceph_osd *osd)
769{
770 if (atomic_inc_not_zero(&osd->o_ref)) {
771 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
772 atomic_read(&osd->o_ref));
773 return osd;
774 } else {
775 dout("get_osd %p FAIL\n", osd);
776 return NULL;
777 }
778}
779
780static void put_osd(struct ceph_osd *osd)
781{
782 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
783 atomic_read(&osd->o_ref) - 1);
a255651d 784 if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
79494d1b
SW
785 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
786
27859f97 787 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
f24e9980 788 kfree(osd);
79494d1b 789 }
f24e9980
SW
790}
791
792/*
793 * remove an osd from our map
794 */
f5a2041b 795static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 796{
f5a2041b 797 dout("__remove_osd %p\n", osd);
f24e9980
SW
798 BUG_ON(!list_empty(&osd->o_requests));
799 rb_erase(&osd->o_node, &osdc->osds);
f5a2041b 800 list_del_init(&osd->o_osd_lru);
f24e9980
SW
801 ceph_con_close(&osd->o_con);
802 put_osd(osd);
803}
804
aca420bc
SW
805static void remove_all_osds(struct ceph_osd_client *osdc)
806{
048a9d2d 807 dout("%s %p\n", __func__, osdc);
aca420bc
SW
808 mutex_lock(&osdc->request_mutex);
809 while (!RB_EMPTY_ROOT(&osdc->osds)) {
810 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
811 struct ceph_osd, o_node);
812 __remove_osd(osdc, osd);
813 }
814 mutex_unlock(&osdc->request_mutex);
815}
816
f5a2041b
YS
817static void __move_osd_to_lru(struct ceph_osd_client *osdc,
818 struct ceph_osd *osd)
819{
820 dout("__move_osd_to_lru %p\n", osd);
821 BUG_ON(!list_empty(&osd->o_osd_lru));
822 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
3d14c5d2 823 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ;
f5a2041b
YS
824}
825
826static void __remove_osd_from_lru(struct ceph_osd *osd)
827{
828 dout("__remove_osd_from_lru %p\n", osd);
829 if (!list_empty(&osd->o_osd_lru))
830 list_del_init(&osd->o_osd_lru);
831}
832
aca420bc 833static void remove_old_osds(struct ceph_osd_client *osdc)
f5a2041b
YS
834{
835 struct ceph_osd *osd, *nosd;
836
837 dout("__remove_old_osds %p\n", osdc);
838 mutex_lock(&osdc->request_mutex);
839 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
aca420bc 840 if (time_before(jiffies, osd->lru_ttl))
f5a2041b
YS
841 break;
842 __remove_osd(osdc, osd);
843 }
844 mutex_unlock(&osdc->request_mutex);
845}
846
f24e9980
SW
847/*
848 * reset osd connect
849 */
f5a2041b 850static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
f24e9980 851{
c3acb181 852 struct ceph_entity_addr *peer_addr;
f24e9980 853
f5a2041b 854 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
a40c4f10
YS
855 if (list_empty(&osd->o_requests) &&
856 list_empty(&osd->o_linger_requests)) {
f5a2041b 857 __remove_osd(osdc, osd);
c3acb181
AE
858
859 return -ENODEV;
860 }
861
862 peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
863 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
864 !ceph_con_opened(&osd->o_con)) {
865 struct ceph_osd_request *req;
866
87b315a5
SW
867 dout(" osd addr hasn't changed and connection never opened,"
868 " letting msgr retry");
869 /* touch each r_stamp for handle_timeout()'s benfit */
870 list_for_each_entry(req, &osd->o_requests, r_osd_item)
871 req->r_stamp = jiffies;
c3acb181
AE
872
873 return -EAGAIN;
f24e9980 874 }
c3acb181
AE
875
876 ceph_con_close(&osd->o_con);
877 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
878 osd->o_incarnation++;
879
880 return 0;
f24e9980
SW
881}
882
883static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
884{
885 struct rb_node **p = &osdc->osds.rb_node;
886 struct rb_node *parent = NULL;
887 struct ceph_osd *osd = NULL;
888
aca420bc 889 dout("__insert_osd %p osd%d\n", new, new->o_osd);
f24e9980
SW
890 while (*p) {
891 parent = *p;
892 osd = rb_entry(parent, struct ceph_osd, o_node);
893 if (new->o_osd < osd->o_osd)
894 p = &(*p)->rb_left;
895 else if (new->o_osd > osd->o_osd)
896 p = &(*p)->rb_right;
897 else
898 BUG();
899 }
900
901 rb_link_node(&new->o_node, parent, p);
902 rb_insert_color(&new->o_node, &osdc->osds);
903}
904
905static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
906{
907 struct ceph_osd *osd;
908 struct rb_node *n = osdc->osds.rb_node;
909
910 while (n) {
911 osd = rb_entry(n, struct ceph_osd, o_node);
912 if (o < osd->o_osd)
913 n = n->rb_left;
914 else if (o > osd->o_osd)
915 n = n->rb_right;
916 else
917 return osd;
918 }
919 return NULL;
920}
921
422d2cb8
YS
922static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
923{
924 schedule_delayed_work(&osdc->timeout_work,
3d14c5d2 925 osdc->client->options->osd_keepalive_timeout * HZ);
422d2cb8
YS
926}
927
928static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
929{
930 cancel_delayed_work(&osdc->timeout_work);
931}
f24e9980
SW
932
933/*
934 * Register request, assign tid. If this is the first request, set up
935 * the timeout event.
936 */
a40c4f10
YS
937static void __register_request(struct ceph_osd_client *osdc,
938 struct ceph_osd_request *req)
f24e9980 939{
f24e9980 940 req->r_tid = ++osdc->last_tid;
6df058c0 941 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
77f38e0e 942 dout("__register_request %p tid %lld\n", req, req->r_tid);
f24e9980
SW
943 __insert_request(osdc, req);
944 ceph_osdc_get_request(req);
945 osdc->num_requests++;
f24e9980 946 if (osdc->num_requests == 1) {
422d2cb8
YS
947 dout(" first request, scheduling timeout\n");
948 __schedule_osd_timeout(osdc);
f24e9980 949 }
a40c4f10
YS
950}
951
f24e9980
SW
952/*
953 * called under osdc->request_mutex
954 */
955static void __unregister_request(struct ceph_osd_client *osdc,
956 struct ceph_osd_request *req)
957{
35f9f8a0
SW
958 if (RB_EMPTY_NODE(&req->r_node)) {
959 dout("__unregister_request %p tid %lld not registered\n",
960 req, req->r_tid);
961 return;
962 }
963
f24e9980
SW
964 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
965 rb_erase(&req->r_node, &osdc->requests);
966 osdc->num_requests--;
967
0ba6478d
SW
968 if (req->r_osd) {
969 /* make sure the original request isn't in flight. */
6740a845 970 ceph_msg_revoke(req->r_request);
0ba6478d
SW
971
972 list_del_init(&req->r_osd_item);
a40c4f10
YS
973 if (list_empty(&req->r_osd->o_requests) &&
974 list_empty(&req->r_osd->o_linger_requests)) {
975 dout("moving osd to %p lru\n", req->r_osd);
f5a2041b 976 __move_osd_to_lru(osdc, req->r_osd);
a40c4f10 977 }
fbdb9190 978 if (list_empty(&req->r_linger_item))
a40c4f10 979 req->r_osd = NULL;
0ba6478d 980 }
f24e9980 981
7d5f2481 982 list_del_init(&req->r_req_lru_item);
f24e9980
SW
983 ceph_osdc_put_request(req);
984
422d2cb8
YS
985 if (osdc->num_requests == 0) {
986 dout(" no requests, canceling timeout\n");
987 __cancel_osd_timeout(osdc);
f24e9980
SW
988 }
989}
990
991/*
992 * Cancel a previously queued request message
993 */
994static void __cancel_request(struct ceph_osd_request *req)
995{
6bc18876 996 if (req->r_sent && req->r_osd) {
6740a845 997 ceph_msg_revoke(req->r_request);
f24e9980
SW
998 req->r_sent = 0;
999 }
1000}
1001
a40c4f10
YS
1002static void __register_linger_request(struct ceph_osd_client *osdc,
1003 struct ceph_osd_request *req)
1004{
1005 dout("__register_linger_request %p\n", req);
1006 list_add_tail(&req->r_linger_item, &osdc->req_linger);
6194ea89
SW
1007 if (req->r_osd)
1008 list_add_tail(&req->r_linger_osd,
1009 &req->r_osd->o_linger_requests);
a40c4f10
YS
1010}
1011
1012static void __unregister_linger_request(struct ceph_osd_client *osdc,
1013 struct ceph_osd_request *req)
1014{
1015 dout("__unregister_linger_request %p\n", req);
61c74035 1016 list_del_init(&req->r_linger_item);
a40c4f10 1017 if (req->r_osd) {
a40c4f10
YS
1018 list_del_init(&req->r_linger_osd);
1019
1020 if (list_empty(&req->r_osd->o_requests) &&
1021 list_empty(&req->r_osd->o_linger_requests)) {
1022 dout("moving osd to %p lru\n", req->r_osd);
1023 __move_osd_to_lru(osdc, req->r_osd);
1024 }
fbdb9190
SW
1025 if (list_empty(&req->r_osd_item))
1026 req->r_osd = NULL;
a40c4f10
YS
1027 }
1028}
1029
1030void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
1031 struct ceph_osd_request *req)
1032{
1033 mutex_lock(&osdc->request_mutex);
1034 if (req->r_linger) {
1035 __unregister_linger_request(osdc, req);
1036 ceph_osdc_put_request(req);
1037 }
1038 mutex_unlock(&osdc->request_mutex);
1039}
1040EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
1041
1042void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
1043 struct ceph_osd_request *req)
1044{
1045 if (!req->r_linger) {
1046 dout("set_request_linger %p\n", req);
1047 req->r_linger = 1;
1048 /*
1049 * caller is now responsible for calling
1050 * unregister_linger_request
1051 */
1052 ceph_osdc_get_request(req);
1053 }
1054}
1055EXPORT_SYMBOL(ceph_osdc_set_request_linger);
1056
f24e9980
SW
1057/*
1058 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
1059 * (as needed), and set the request r_osd appropriately. If there is
25985edc 1060 * no up osd, set r_osd to NULL. Move the request to the appropriate list
6f6c7006 1061 * (unsent, homeless) or leave on in-flight lru.
f24e9980
SW
1062 *
1063 * Return 0 if unchanged, 1 if changed, or negative on error.
1064 *
1065 * Caller should hold map_sem for read and request_mutex.
1066 */
6f6c7006 1067static int __map_request(struct ceph_osd_client *osdc,
38d6453c 1068 struct ceph_osd_request *req, int force_resend)
f24e9980 1069{
5b191d99 1070 struct ceph_pg pgid;
d85b7056
SW
1071 int acting[CEPH_PG_MAX_SIZE];
1072 int o = -1, num = 0;
f24e9980 1073 int err;
f24e9980 1074
6f6c7006 1075 dout("map_request %p tid %lld\n", req, req->r_tid);
41766f87
AE
1076 err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap,
1077 ceph_file_layout_pg_pool(req->r_file_layout));
6f6c7006
SW
1078 if (err) {
1079 list_move(&req->r_req_lru_item, &osdc->req_notarget);
f24e9980 1080 return err;
6f6c7006 1081 }
7740a42f
SW
1082 req->r_pgid = pgid;
1083
d85b7056
SW
1084 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
1085 if (err > 0) {
1086 o = acting[0];
1087 num = err;
1088 }
f24e9980 1089
38d6453c
SW
1090 if ((!force_resend &&
1091 req->r_osd && req->r_osd->o_osd == o &&
d85b7056
SW
1092 req->r_sent >= req->r_osd->o_incarnation &&
1093 req->r_num_pg_osds == num &&
1094 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
f24e9980
SW
1095 (req->r_osd == NULL && o == -1))
1096 return 0; /* no change */
1097
5b191d99
SW
1098 dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
1099 req->r_tid, pgid.pool, pgid.seed, o,
f24e9980
SW
1100 req->r_osd ? req->r_osd->o_osd : -1);
1101
d85b7056
SW
1102 /* record full pg acting set */
1103 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
1104 req->r_num_pg_osds = num;
1105
f24e9980
SW
1106 if (req->r_osd) {
1107 __cancel_request(req);
1108 list_del_init(&req->r_osd_item);
f24e9980
SW
1109 req->r_osd = NULL;
1110 }
1111
1112 req->r_osd = __lookup_osd(osdc, o);
1113 if (!req->r_osd && o >= 0) {
c99eb1c7 1114 err = -ENOMEM;
e10006f8 1115 req->r_osd = create_osd(osdc, o);
6f6c7006
SW
1116 if (!req->r_osd) {
1117 list_move(&req->r_req_lru_item, &osdc->req_notarget);
c99eb1c7 1118 goto out;
6f6c7006 1119 }
f24e9980 1120
6f6c7006 1121 dout("map_request osd %p is osd%d\n", req->r_osd, o);
f24e9980
SW
1122 __insert_osd(osdc, req->r_osd);
1123
b7a9e5dd
SW
1124 ceph_con_open(&req->r_osd->o_con,
1125 CEPH_ENTITY_TYPE_OSD, o,
1126 &osdc->osdmap->osd_addr[o]);
f24e9980
SW
1127 }
1128
f5a2041b
YS
1129 if (req->r_osd) {
1130 __remove_osd_from_lru(req->r_osd);
ad885927
AE
1131 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
1132 list_move_tail(&req->r_req_lru_item, &osdc->req_unsent);
6f6c7006 1133 } else {
ad885927 1134 list_move_tail(&req->r_req_lru_item, &osdc->req_notarget);
f5a2041b 1135 }
d85b7056 1136 err = 1; /* osd or pg changed */
f24e9980
SW
1137
1138out:
f24e9980
SW
1139 return err;
1140}
1141
1142/*
1143 * caller should hold map_sem (for read) and request_mutex
1144 */
56e925b6
SW
1145static void __send_request(struct ceph_osd_client *osdc,
1146 struct ceph_osd_request *req)
f24e9980 1147{
1b83bef2 1148 void *p;
f24e9980 1149
1b83bef2
SW
1150 dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
1151 req, req->r_tid, req->r_osd->o_osd, req->r_flags,
1152 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
1153
1154 /* fill in message content that changes each time we send it */
1155 put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
1156 put_unaligned_le32(req->r_flags, req->r_request_flags);
1157 put_unaligned_le64(req->r_pgid.pool, req->r_request_pool);
1158 p = req->r_request_pgid;
1159 ceph_encode_64(&p, req->r_pgid.pool);
1160 ceph_encode_32(&p, req->r_pgid.seed);
1161 put_unaligned_le64(1, req->r_request_attempts); /* FIXME */
1162 memcpy(req->r_request_reassert_version, &req->r_reassert_version,
1163 sizeof(req->r_reassert_version));
2169aea6 1164
3dd72fc0 1165 req->r_stamp = jiffies;
07a27e22 1166 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
f24e9980
SW
1167
1168 ceph_msg_get(req->r_request); /* send consumes a ref */
1169 ceph_con_send(&req->r_osd->o_con, req->r_request);
1170 req->r_sent = req->r_osd->o_incarnation;
f24e9980
SW
1171}
1172
6f6c7006
SW
1173/*
1174 * Send any requests in the queue (req_unsent).
1175 */
f9d25199 1176static void __send_queued(struct ceph_osd_client *osdc)
6f6c7006
SW
1177{
1178 struct ceph_osd_request *req, *tmp;
1179
f9d25199
AE
1180 dout("__send_queued\n");
1181 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
6f6c7006 1182 __send_request(osdc, req);
6f6c7006
SW
1183}
1184
f24e9980
SW
1185/*
1186 * Timeout callback, called every N seconds when 1 or more osd
1187 * requests has been active for more than N seconds. When this
1188 * happens, we ping all OSDs with requests who have timed out to
1189 * ensure any communications channel reset is detected. Reset the
1190 * request timeouts another N seconds in the future as we go.
1191 * Reschedule the timeout event another N seconds in future (unless
1192 * there are no open requests).
1193 */
1194static void handle_timeout(struct work_struct *work)
1195{
1196 struct ceph_osd_client *osdc =
1197 container_of(work, struct ceph_osd_client, timeout_work.work);
83aff95e 1198 struct ceph_osd_request *req;
f24e9980 1199 struct ceph_osd *osd;
422d2cb8 1200 unsigned long keepalive =
3d14c5d2 1201 osdc->client->options->osd_keepalive_timeout * HZ;
422d2cb8 1202 struct list_head slow_osds;
f24e9980
SW
1203 dout("timeout\n");
1204 down_read(&osdc->map_sem);
1205
1206 ceph_monc_request_next_osdmap(&osdc->client->monc);
1207
1208 mutex_lock(&osdc->request_mutex);
f24e9980 1209
422d2cb8
YS
1210 /*
1211 * ping osds that are a bit slow. this ensures that if there
1212 * is a break in the TCP connection we will notice, and reopen
1213 * a connection with that osd (from the fault callback).
1214 */
1215 INIT_LIST_HEAD(&slow_osds);
1216 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
3dd72fc0 1217 if (time_before(jiffies, req->r_stamp + keepalive))
422d2cb8
YS
1218 break;
1219
1220 osd = req->r_osd;
1221 BUG_ON(!osd);
1222 dout(" tid %llu is slow, will send keepalive on osd%d\n",
f24e9980 1223 req->r_tid, osd->o_osd);
422d2cb8
YS
1224 list_move_tail(&osd->o_keepalive_item, &slow_osds);
1225 }
1226 while (!list_empty(&slow_osds)) {
1227 osd = list_entry(slow_osds.next, struct ceph_osd,
1228 o_keepalive_item);
1229 list_del_init(&osd->o_keepalive_item);
f24e9980
SW
1230 ceph_con_keepalive(&osd->o_con);
1231 }
1232
422d2cb8 1233 __schedule_osd_timeout(osdc);
f9d25199 1234 __send_queued(osdc);
f24e9980 1235 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1236 up_read(&osdc->map_sem);
1237}
1238
f5a2041b
YS
1239static void handle_osds_timeout(struct work_struct *work)
1240{
1241 struct ceph_osd_client *osdc =
1242 container_of(work, struct ceph_osd_client,
1243 osds_timeout_work.work);
1244 unsigned long delay =
3d14c5d2 1245 osdc->client->options->osd_idle_ttl * HZ >> 2;
f5a2041b
YS
1246
1247 dout("osds timeout\n");
1248 down_read(&osdc->map_sem);
aca420bc 1249 remove_old_osds(osdc);
f5a2041b
YS
1250 up_read(&osdc->map_sem);
1251
1252 schedule_delayed_work(&osdc->osds_timeout_work,
1253 round_jiffies_relative(delay));
1254}
1255
25845472
SW
1256static void complete_request(struct ceph_osd_request *req)
1257{
1258 if (req->r_safe_callback)
1259 req->r_safe_callback(req, NULL);
1260 complete_all(&req->r_safe_completion); /* fsync waiter */
1261}
1262
f24e9980
SW
1263/*
1264 * handle osd op reply. either call the callback if it is specified,
1265 * or do the completion to wake up the waiting thread.
1266 */
350b1c32
SW
1267static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1268 struct ceph_connection *con)
f24e9980 1269{
1b83bef2 1270 void *p, *end;
f24e9980
SW
1271 struct ceph_osd_request *req;
1272 u64 tid;
1b83bef2
SW
1273 int object_len;
1274 int numops, payload_len, flags;
0ceed5db 1275 s32 result;
1b83bef2
SW
1276 s32 retry_attempt;
1277 struct ceph_pg pg;
1278 int err;
1279 u32 reassert_epoch;
1280 u64 reassert_version;
1281 u32 osdmap_epoch;
0d5af164 1282 int already_completed;
1b83bef2 1283 int i;
f24e9980 1284
6df058c0 1285 tid = le64_to_cpu(msg->hdr.tid);
1b83bef2
SW
1286 dout("handle_reply %p tid %llu\n", msg, tid);
1287
1288 p = msg->front.iov_base;
1289 end = p + msg->front.iov_len;
1290
1291 ceph_decode_need(&p, end, 4, bad);
1292 object_len = ceph_decode_32(&p);
1293 ceph_decode_need(&p, end, object_len, bad);
1294 p += object_len;
1295
ef4859d6 1296 err = ceph_decode_pgid(&p, end, &pg);
1b83bef2 1297 if (err)
f24e9980 1298 goto bad;
1b83bef2
SW
1299
1300 ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
1301 flags = ceph_decode_64(&p);
1302 result = ceph_decode_32(&p);
1303 reassert_epoch = ceph_decode_32(&p);
1304 reassert_version = ceph_decode_64(&p);
1305 osdmap_epoch = ceph_decode_32(&p);
1306
f24e9980
SW
1307 /* lookup */
1308 mutex_lock(&osdc->request_mutex);
1309 req = __lookup_request(osdc, tid);
1310 if (req == NULL) {
1311 dout("handle_reply tid %llu dne\n", tid);
8058fd45 1312 goto bad_mutex;
f24e9980
SW
1313 }
1314 ceph_osdc_get_request(req);
1b83bef2
SW
1315
1316 dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
1317 req, result);
1318
1319 ceph_decode_need(&p, end, 4, bad);
1320 numops = ceph_decode_32(&p);
1321 if (numops > CEPH_OSD_MAX_OP)
1322 goto bad_put;
1323 if (numops != req->r_num_ops)
1324 goto bad_put;
1325 payload_len = 0;
1326 ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad);
1327 for (i = 0; i < numops; i++) {
1328 struct ceph_osd_op *op = p;
1329 int len;
1330
1331 len = le32_to_cpu(op->payload_len);
1332 req->r_reply_op_len[i] = len;
1333 dout(" op %d has %d bytes\n", i, len);
1334 payload_len += len;
1335 p += sizeof(*op);
1336 }
1337 if (payload_len != le32_to_cpu(msg->hdr.data_len)) {
1338 pr_warning("sum of op payload lens %d != data_len %d",
1339 payload_len, le32_to_cpu(msg->hdr.data_len));
1340 goto bad_put;
1341 }
1342
1343 ceph_decode_need(&p, end, 4 + numops * 4, bad);
1344 retry_attempt = ceph_decode_32(&p);
1345 for (i = 0; i < numops; i++)
1346 req->r_reply_op_result[i] = ceph_decode_32(&p);
f24e9980 1347
f24e9980 1348 if (!req->r_got_reply) {
95c96174 1349 unsigned int bytes;
f24e9980 1350
1b83bef2 1351 req->r_result = result;
f24e9980
SW
1352 bytes = le32_to_cpu(msg->hdr.data_len);
1353 dout("handle_reply result %d bytes %d\n", req->r_result,
1354 bytes);
1355 if (req->r_result == 0)
1356 req->r_result = bytes;
1357
1358 /* in case this is a write and we need to replay, */
1b83bef2
SW
1359 req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
1360 req->r_reassert_version.version = cpu_to_le64(reassert_version);
f24e9980
SW
1361
1362 req->r_got_reply = 1;
1363 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1364 dout("handle_reply tid %llu dup ack\n", tid);
34b43a56 1365 mutex_unlock(&osdc->request_mutex);
f24e9980
SW
1366 goto done;
1367 }
1368
1369 dout("handle_reply tid %llu flags %d\n", tid, flags);
1370
a40c4f10
YS
1371 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
1372 __register_linger_request(osdc, req);
1373
f24e9980 1374 /* either this is a read, or we got the safe response */
0ceed5db
SW
1375 if (result < 0 ||
1376 (flags & CEPH_OSD_FLAG_ONDISK) ||
f24e9980
SW
1377 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
1378 __unregister_request(osdc, req);
1379
0d5af164
AE
1380 already_completed = req->r_completed;
1381 req->r_completed = 1;
f24e9980 1382 mutex_unlock(&osdc->request_mutex);
0d5af164
AE
1383 if (already_completed)
1384 goto done;
f24e9980
SW
1385
1386 if (req->r_callback)
1387 req->r_callback(req, msg);
1388 else
03066f23 1389 complete_all(&req->r_completion);
f24e9980 1390
25845472
SW
1391 if (flags & CEPH_OSD_FLAG_ONDISK)
1392 complete_request(req);
f24e9980
SW
1393
1394done:
a40c4f10 1395 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
f24e9980
SW
1396 ceph_osdc_put_request(req);
1397 return;
1398
1b83bef2
SW
1399bad_put:
1400 ceph_osdc_put_request(req);
8058fd45
AE
1401bad_mutex:
1402 mutex_unlock(&osdc->request_mutex);
f24e9980 1403bad:
1b83bef2
SW
1404 pr_err("corrupt osd_op_reply got %d %d\n",
1405 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
9ec7cab1 1406 ceph_msg_dump(msg);
f24e9980
SW
1407}
1408
6f6c7006 1409static void reset_changed_osds(struct ceph_osd_client *osdc)
f24e9980 1410{
f24e9980 1411 struct rb_node *p, *n;
f24e9980 1412
6f6c7006
SW
1413 for (p = rb_first(&osdc->osds); p; p = n) {
1414 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
f24e9980 1415
6f6c7006
SW
1416 n = rb_next(p);
1417 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1418 memcmp(&osd->o_con.peer_addr,
1419 ceph_osd_addr(osdc->osdmap,
1420 osd->o_osd),
1421 sizeof(struct ceph_entity_addr)) != 0)
1422 __reset_osd(osdc, osd);
f24e9980 1423 }
422d2cb8
YS
1424}
1425
1426/*
6f6c7006
SW
1427 * Requeue requests whose mapping to an OSD has changed. If requests map to
1428 * no osd, request a new map.
422d2cb8 1429 *
e6d50f67 1430 * Caller should hold map_sem for read.
422d2cb8 1431 */
38d6453c 1432static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
422d2cb8 1433{
a40c4f10 1434 struct ceph_osd_request *req, *nreq;
6f6c7006
SW
1435 struct rb_node *p;
1436 int needmap = 0;
1437 int err;
422d2cb8 1438
38d6453c 1439 dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
422d2cb8 1440 mutex_lock(&osdc->request_mutex);
6194ea89 1441 for (p = rb_first(&osdc->requests); p; ) {
6f6c7006 1442 req = rb_entry(p, struct ceph_osd_request, r_node);
6194ea89 1443 p = rb_next(p);
ab60b16d
AE
1444
1445 /*
1446 * For linger requests that have not yet been
1447 * registered, move them to the linger list; they'll
1448 * be sent to the osd in the loop below. Unregister
1449 * the request before re-registering it as a linger
1450 * request to ensure the __map_request() below
1451 * will decide it needs to be sent.
1452 */
1453 if (req->r_linger && list_empty(&req->r_linger_item)) {
1454 dout("%p tid %llu restart on osd%d\n",
1455 req, req->r_tid,
1456 req->r_osd ? req->r_osd->o_osd : -1);
1457 __unregister_request(osdc, req);
1458 __register_linger_request(osdc, req);
1459 continue;
1460 }
1461
38d6453c 1462 err = __map_request(osdc, req, force_resend);
6f6c7006
SW
1463 if (err < 0)
1464 continue; /* error */
1465 if (req->r_osd == NULL) {
1466 dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1467 needmap++; /* request a newer map */
1468 } else if (err > 0) {
6194ea89
SW
1469 if (!req->r_linger) {
1470 dout("%p tid %llu requeued on osd%d\n", req,
1471 req->r_tid,
1472 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1473 req->r_flags |= CEPH_OSD_FLAG_RETRY;
6194ea89
SW
1474 }
1475 }
a40c4f10
YS
1476 }
1477
1478 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
1479 r_linger_item) {
1480 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1481
38d6453c 1482 err = __map_request(osdc, req, force_resend);
ab60b16d 1483 dout("__map_request returned %d\n", err);
a40c4f10
YS
1484 if (err == 0)
1485 continue; /* no change and no osd was specified */
1486 if (err < 0)
1487 continue; /* hrm! */
1488 if (req->r_osd == NULL) {
1489 dout("tid %llu maps to no valid osd\n", req->r_tid);
1490 needmap++; /* request a newer map */
1491 continue;
6f6c7006 1492 }
a40c4f10
YS
1493
1494 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1495 req->r_osd ? req->r_osd->o_osd : -1);
a40c4f10 1496 __register_request(osdc, req);
c89ce05e 1497 __unregister_linger_request(osdc, req);
6f6c7006 1498 }
f24e9980
SW
1499 mutex_unlock(&osdc->request_mutex);
1500
1501 if (needmap) {
1502 dout("%d requests for down osds, need new map\n", needmap);
1503 ceph_monc_request_next_osdmap(&osdc->client->monc);
1504 }
e6d50f67 1505 reset_changed_osds(osdc);
422d2cb8 1506}
6f6c7006
SW
1507
1508
f24e9980
SW
1509/*
1510 * Process updated osd map.
1511 *
1512 * The message contains any number of incremental and full maps, normally
1513 * indicating some sort of topology change in the cluster. Kick requests
1514 * off to different OSDs as needed.
1515 */
1516void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1517{
1518 void *p, *end, *next;
1519 u32 nr_maps, maplen;
1520 u32 epoch;
1521 struct ceph_osdmap *newmap = NULL, *oldmap;
1522 int err;
1523 struct ceph_fsid fsid;
1524
1525 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
1526 p = msg->front.iov_base;
1527 end = p + msg->front.iov_len;
1528
1529 /* verify fsid */
1530 ceph_decode_need(&p, end, sizeof(fsid), bad);
1531 ceph_decode_copy(&p, &fsid, sizeof(fsid));
0743304d
SW
1532 if (ceph_check_fsid(osdc->client, &fsid) < 0)
1533 return;
f24e9980
SW
1534
1535 down_write(&osdc->map_sem);
1536
1537 /* incremental maps */
1538 ceph_decode_32_safe(&p, end, nr_maps, bad);
1539 dout(" %d inc maps\n", nr_maps);
1540 while (nr_maps > 0) {
1541 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1542 epoch = ceph_decode_32(&p);
1543 maplen = ceph_decode_32(&p);
f24e9980
SW
1544 ceph_decode_need(&p, end, maplen, bad);
1545 next = p + maplen;
1546 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
1547 dout("applying incremental map %u len %d\n",
1548 epoch, maplen);
1549 newmap = osdmap_apply_incremental(&p, next,
1550 osdc->osdmap,
15d9882c 1551 &osdc->client->msgr);
f24e9980
SW
1552 if (IS_ERR(newmap)) {
1553 err = PTR_ERR(newmap);
1554 goto bad;
1555 }
30dc6381 1556 BUG_ON(!newmap);
f24e9980
SW
1557 if (newmap != osdc->osdmap) {
1558 ceph_osdmap_destroy(osdc->osdmap);
1559 osdc->osdmap = newmap;
1560 }
38d6453c 1561 kick_requests(osdc, 0);
f24e9980
SW
1562 } else {
1563 dout("ignoring incremental map %u len %d\n",
1564 epoch, maplen);
1565 }
1566 p = next;
1567 nr_maps--;
1568 }
1569 if (newmap)
1570 goto done;
1571
1572 /* full maps */
1573 ceph_decode_32_safe(&p, end, nr_maps, bad);
1574 dout(" %d full maps\n", nr_maps);
1575 while (nr_maps) {
1576 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
c89136ea
SW
1577 epoch = ceph_decode_32(&p);
1578 maplen = ceph_decode_32(&p);
f24e9980
SW
1579 ceph_decode_need(&p, end, maplen, bad);
1580 if (nr_maps > 1) {
1581 dout("skipping non-latest full map %u len %d\n",
1582 epoch, maplen);
1583 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
1584 dout("skipping full map %u len %d, "
1585 "older than our %u\n", epoch, maplen,
1586 osdc->osdmap->epoch);
1587 } else {
38d6453c
SW
1588 int skipped_map = 0;
1589
f24e9980
SW
1590 dout("taking full map %u len %d\n", epoch, maplen);
1591 newmap = osdmap_decode(&p, p+maplen);
1592 if (IS_ERR(newmap)) {
1593 err = PTR_ERR(newmap);
1594 goto bad;
1595 }
30dc6381 1596 BUG_ON(!newmap);
f24e9980
SW
1597 oldmap = osdc->osdmap;
1598 osdc->osdmap = newmap;
38d6453c
SW
1599 if (oldmap) {
1600 if (oldmap->epoch + 1 < newmap->epoch)
1601 skipped_map = 1;
f24e9980 1602 ceph_osdmap_destroy(oldmap);
38d6453c
SW
1603 }
1604 kick_requests(osdc, skipped_map);
f24e9980
SW
1605 }
1606 p += maplen;
1607 nr_maps--;
1608 }
1609
1610done:
1611 downgrade_write(&osdc->map_sem);
1612 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
cd634fb6
SW
1613
1614 /*
1615 * subscribe to subsequent osdmap updates if full to ensure
1616 * we find out when we are no longer full and stop returning
1617 * ENOSPC.
1618 */
1619 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1620 ceph_monc_request_next_osdmap(&osdc->client->monc);
1621
f9d25199
AE
1622 mutex_lock(&osdc->request_mutex);
1623 __send_queued(osdc);
1624 mutex_unlock(&osdc->request_mutex);
f24e9980 1625 up_read(&osdc->map_sem);
03066f23 1626 wake_up_all(&osdc->client->auth_wq);
f24e9980
SW
1627 return;
1628
1629bad:
1630 pr_err("osdc handle_map corrupt msg\n");
9ec7cab1 1631 ceph_msg_dump(msg);
f24e9980
SW
1632 up_write(&osdc->map_sem);
1633 return;
1634}
1635
a40c4f10
YS
1636/*
1637 * watch/notify callback event infrastructure
1638 *
1639 * These callbacks are used both for watch and notify operations.
1640 */
1641static void __release_event(struct kref *kref)
1642{
1643 struct ceph_osd_event *event =
1644 container_of(kref, struct ceph_osd_event, kref);
1645
1646 dout("__release_event %p\n", event);
1647 kfree(event);
1648}
1649
1650static void get_event(struct ceph_osd_event *event)
1651{
1652 kref_get(&event->kref);
1653}
1654
1655void ceph_osdc_put_event(struct ceph_osd_event *event)
1656{
1657 kref_put(&event->kref, __release_event);
1658}
1659EXPORT_SYMBOL(ceph_osdc_put_event);
1660
1661static void __insert_event(struct ceph_osd_client *osdc,
1662 struct ceph_osd_event *new)
1663{
1664 struct rb_node **p = &osdc->event_tree.rb_node;
1665 struct rb_node *parent = NULL;
1666 struct ceph_osd_event *event = NULL;
1667
1668 while (*p) {
1669 parent = *p;
1670 event = rb_entry(parent, struct ceph_osd_event, node);
1671 if (new->cookie < event->cookie)
1672 p = &(*p)->rb_left;
1673 else if (new->cookie > event->cookie)
1674 p = &(*p)->rb_right;
1675 else
1676 BUG();
1677 }
1678
1679 rb_link_node(&new->node, parent, p);
1680 rb_insert_color(&new->node, &osdc->event_tree);
1681}
1682
1683static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
1684 u64 cookie)
1685{
1686 struct rb_node **p = &osdc->event_tree.rb_node;
1687 struct rb_node *parent = NULL;
1688 struct ceph_osd_event *event = NULL;
1689
1690 while (*p) {
1691 parent = *p;
1692 event = rb_entry(parent, struct ceph_osd_event, node);
1693 if (cookie < event->cookie)
1694 p = &(*p)->rb_left;
1695 else if (cookie > event->cookie)
1696 p = &(*p)->rb_right;
1697 else
1698 return event;
1699 }
1700 return NULL;
1701}
1702
1703static void __remove_event(struct ceph_osd_event *event)
1704{
1705 struct ceph_osd_client *osdc = event->osdc;
1706
1707 if (!RB_EMPTY_NODE(&event->node)) {
1708 dout("__remove_event removed %p\n", event);
1709 rb_erase(&event->node, &osdc->event_tree);
1710 ceph_osdc_put_event(event);
1711 } else {
1712 dout("__remove_event didn't remove %p\n", event);
1713 }
1714}
1715
1716int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1717 void (*event_cb)(u64, u64, u8, void *),
3c663bbd 1718 void *data, struct ceph_osd_event **pevent)
a40c4f10
YS
1719{
1720 struct ceph_osd_event *event;
1721
1722 event = kmalloc(sizeof(*event), GFP_NOIO);
1723 if (!event)
1724 return -ENOMEM;
1725
1726 dout("create_event %p\n", event);
1727 event->cb = event_cb;
3c663bbd 1728 event->one_shot = 0;
a40c4f10
YS
1729 event->data = data;
1730 event->osdc = osdc;
1731 INIT_LIST_HEAD(&event->osd_node);
3ee5234d 1732 RB_CLEAR_NODE(&event->node);
a40c4f10
YS
1733 kref_init(&event->kref); /* one ref for us */
1734 kref_get(&event->kref); /* one ref for the caller */
a40c4f10
YS
1735
1736 spin_lock(&osdc->event_lock);
1737 event->cookie = ++osdc->event_count;
1738 __insert_event(osdc, event);
1739 spin_unlock(&osdc->event_lock);
1740
1741 *pevent = event;
1742 return 0;
1743}
1744EXPORT_SYMBOL(ceph_osdc_create_event);
1745
1746void ceph_osdc_cancel_event(struct ceph_osd_event *event)
1747{
1748 struct ceph_osd_client *osdc = event->osdc;
1749
1750 dout("cancel_event %p\n", event);
1751 spin_lock(&osdc->event_lock);
1752 __remove_event(event);
1753 spin_unlock(&osdc->event_lock);
1754 ceph_osdc_put_event(event); /* caller's */
1755}
1756EXPORT_SYMBOL(ceph_osdc_cancel_event);
1757
1758
1759static void do_event_work(struct work_struct *work)
1760{
1761 struct ceph_osd_event_work *event_work =
1762 container_of(work, struct ceph_osd_event_work, work);
1763 struct ceph_osd_event *event = event_work->event;
1764 u64 ver = event_work->ver;
1765 u64 notify_id = event_work->notify_id;
1766 u8 opcode = event_work->opcode;
1767
1768 dout("do_event_work completing %p\n", event);
1769 event->cb(ver, notify_id, opcode, event->data);
a40c4f10
YS
1770 dout("do_event_work completed %p\n", event);
1771 ceph_osdc_put_event(event);
1772 kfree(event_work);
1773}
1774
1775
1776/*
1777 * Process osd watch notifications
1778 */
3c663bbd
AE
1779static void handle_watch_notify(struct ceph_osd_client *osdc,
1780 struct ceph_msg *msg)
a40c4f10
YS
1781{
1782 void *p, *end;
1783 u8 proto_ver;
1784 u64 cookie, ver, notify_id;
1785 u8 opcode;
1786 struct ceph_osd_event *event;
1787 struct ceph_osd_event_work *event_work;
1788
1789 p = msg->front.iov_base;
1790 end = p + msg->front.iov_len;
1791
1792 ceph_decode_8_safe(&p, end, proto_ver, bad);
1793 ceph_decode_8_safe(&p, end, opcode, bad);
1794 ceph_decode_64_safe(&p, end, cookie, bad);
1795 ceph_decode_64_safe(&p, end, ver, bad);
1796 ceph_decode_64_safe(&p, end, notify_id, bad);
1797
1798 spin_lock(&osdc->event_lock);
1799 event = __find_event(osdc, cookie);
1800 if (event) {
3c663bbd 1801 BUG_ON(event->one_shot);
a40c4f10 1802 get_event(event);
a40c4f10
YS
1803 }
1804 spin_unlock(&osdc->event_lock);
1805 dout("handle_watch_notify cookie %lld ver %lld event %p\n",
1806 cookie, ver, event);
1807 if (event) {
1808 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
a40c4f10
YS
1809 if (!event_work) {
1810 dout("ERROR: could not allocate event_work\n");
1811 goto done_err;
1812 }
6b0ae409 1813 INIT_WORK(&event_work->work, do_event_work);
a40c4f10
YS
1814 event_work->event = event;
1815 event_work->ver = ver;
1816 event_work->notify_id = notify_id;
1817 event_work->opcode = opcode;
1818 if (!queue_work(osdc->notify_wq, &event_work->work)) {
1819 dout("WARNING: failed to queue notify event work\n");
1820 goto done_err;
1821 }
1822 }
1823
1824 return;
1825
1826done_err:
a40c4f10
YS
1827 ceph_osdc_put_event(event);
1828 return;
1829
1830bad:
1831 pr_err("osdc handle_watch_notify corrupt msg\n");
1832 return;
1833}
1834
70636773
AE
1835static void ceph_osdc_msg_data_set(struct ceph_msg *msg,
1836 struct ceph_osd_data *osd_data)
f24e9980 1837{
0fff87ec 1838 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
f1baeb2b 1839 BUG_ON(osd_data->length > (u64) SIZE_MAX);
ebf18f47 1840 if (osd_data->length)
70636773
AE
1841 ceph_msg_data_set_pages(msg, osd_data->pages,
1842 osd_data->length, osd_data->alignment);
9a5e6d09
AE
1843 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
1844 BUG_ON(!osd_data->pagelist->length);
1845 ceph_msg_data_set_pagelist(msg, osd_data->pagelist);
68b4476b 1846#ifdef CONFIG_BLOCK
0fff87ec 1847 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
a1930804 1848 ceph_msg_data_set_bio(msg, osd_data->bio, osd_data->bio_length);
68b4476b 1849#endif
2ac2b7a6 1850 } else {
0fff87ec 1851 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
2ac2b7a6 1852 }
70636773
AE
1853}
1854
1855/*
1856 * Register request, send initial attempt.
1857 */
1858int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1859 struct ceph_osd_request *req,
1860 bool nofail)
1861{
1862 int rc = 0;
1863
1864 /* Set up response incoming data and request outgoing data fields */
1865
1866 ceph_osdc_msg_data_set(req->r_reply, &req->r_data_in);
1867 ceph_osdc_msg_data_set(req->r_request, &req->r_data_out);
f24e9980 1868
f24e9980
SW
1869 down_read(&osdc->map_sem);
1870 mutex_lock(&osdc->request_mutex);
dc4b870c 1871 __register_request(osdc, req);
92451b49
AE
1872 WARN_ON(req->r_sent);
1873 rc = __map_request(osdc, req, 0);
1874 if (rc < 0) {
1875 if (nofail) {
1876 dout("osdc_start_request failed map, "
1877 " will retry %lld\n", req->r_tid);
1878 rc = 0;
f24e9980 1879 }
92451b49 1880 goto out_unlock;
f24e9980 1881 }
92451b49
AE
1882 if (req->r_osd == NULL) {
1883 dout("send_request %p no up osds in pg\n", req);
1884 ceph_monc_request_next_osdmap(&osdc->client->monc);
1885 } else {
7e2766a1 1886 __send_queued(osdc);
92451b49
AE
1887 }
1888 rc = 0;
234af26f 1889out_unlock:
f24e9980
SW
1890 mutex_unlock(&osdc->request_mutex);
1891 up_read(&osdc->map_sem);
1892 return rc;
1893}
3d14c5d2 1894EXPORT_SYMBOL(ceph_osdc_start_request);
f24e9980
SW
1895
1896/*
1897 * wait for a request to complete
1898 */
1899int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1900 struct ceph_osd_request *req)
1901{
1902 int rc;
1903
1904 rc = wait_for_completion_interruptible(&req->r_completion);
1905 if (rc < 0) {
1906 mutex_lock(&osdc->request_mutex);
1907 __cancel_request(req);
529cfcc4 1908 __unregister_request(osdc, req);
f24e9980 1909 mutex_unlock(&osdc->request_mutex);
25845472 1910 complete_request(req);
529cfcc4 1911 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
f24e9980
SW
1912 return rc;
1913 }
1914
1915 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1916 return req->r_result;
1917}
3d14c5d2 1918EXPORT_SYMBOL(ceph_osdc_wait_request);
f24e9980
SW
1919
1920/*
1921 * sync - wait for all in-flight requests to flush. avoid starvation.
1922 */
1923void ceph_osdc_sync(struct ceph_osd_client *osdc)
1924{
1925 struct ceph_osd_request *req;
1926 u64 last_tid, next_tid = 0;
1927
1928 mutex_lock(&osdc->request_mutex);
1929 last_tid = osdc->last_tid;
1930 while (1) {
1931 req = __lookup_request_ge(osdc, next_tid);
1932 if (!req)
1933 break;
1934 if (req->r_tid > last_tid)
1935 break;
1936
1937 next_tid = req->r_tid + 1;
1938 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
1939 continue;
1940
1941 ceph_osdc_get_request(req);
1942 mutex_unlock(&osdc->request_mutex);
1943 dout("sync waiting on tid %llu (last is %llu)\n",
1944 req->r_tid, last_tid);
1945 wait_for_completion(&req->r_safe_completion);
1946 mutex_lock(&osdc->request_mutex);
1947 ceph_osdc_put_request(req);
1948 }
1949 mutex_unlock(&osdc->request_mutex);
1950 dout("sync done (thru tid %llu)\n", last_tid);
1951}
3d14c5d2 1952EXPORT_SYMBOL(ceph_osdc_sync);
f24e9980
SW
1953
1954/*
1955 * init, shutdown
1956 */
1957int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1958{
1959 int err;
1960
1961 dout("init\n");
1962 osdc->client = client;
1963 osdc->osdmap = NULL;
1964 init_rwsem(&osdc->map_sem);
1965 init_completion(&osdc->map_waiters);
1966 osdc->last_requested_map = 0;
1967 mutex_init(&osdc->request_mutex);
f24e9980
SW
1968 osdc->last_tid = 0;
1969 osdc->osds = RB_ROOT;
f5a2041b 1970 INIT_LIST_HEAD(&osdc->osd_lru);
f24e9980 1971 osdc->requests = RB_ROOT;
422d2cb8 1972 INIT_LIST_HEAD(&osdc->req_lru);
6f6c7006
SW
1973 INIT_LIST_HEAD(&osdc->req_unsent);
1974 INIT_LIST_HEAD(&osdc->req_notarget);
a40c4f10 1975 INIT_LIST_HEAD(&osdc->req_linger);
f24e9980
SW
1976 osdc->num_requests = 0;
1977 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
f5a2041b 1978 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
a40c4f10
YS
1979 spin_lock_init(&osdc->event_lock);
1980 osdc->event_tree = RB_ROOT;
1981 osdc->event_count = 0;
f5a2041b
YS
1982
1983 schedule_delayed_work(&osdc->osds_timeout_work,
3d14c5d2 1984 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
f24e9980 1985
5f44f142 1986 err = -ENOMEM;
f24e9980
SW
1987 osdc->req_mempool = mempool_create_kmalloc_pool(10,
1988 sizeof(struct ceph_osd_request));
1989 if (!osdc->req_mempool)
5f44f142 1990 goto out;
f24e9980 1991
d50b409f
SW
1992 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
1993 OSD_OP_FRONT_LEN, 10, true,
4f48280e 1994 "osd_op");
f24e9980 1995 if (err < 0)
5f44f142 1996 goto out_mempool;
d50b409f 1997 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4f48280e
SW
1998 OSD_OPREPLY_FRONT_LEN, 10, true,
1999 "osd_op_reply");
c16e7869
SW
2000 if (err < 0)
2001 goto out_msgpool;
a40c4f10
YS
2002
2003 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
2004 if (IS_ERR(osdc->notify_wq)) {
2005 err = PTR_ERR(osdc->notify_wq);
2006 osdc->notify_wq = NULL;
2007 goto out_msgpool;
2008 }
f24e9980 2009 return 0;
5f44f142 2010
c16e7869
SW
2011out_msgpool:
2012 ceph_msgpool_destroy(&osdc->msgpool_op);
5f44f142
SW
2013out_mempool:
2014 mempool_destroy(osdc->req_mempool);
2015out:
2016 return err;
f24e9980
SW
2017}
2018
2019void ceph_osdc_stop(struct ceph_osd_client *osdc)
2020{
a40c4f10
YS
2021 flush_workqueue(osdc->notify_wq);
2022 destroy_workqueue(osdc->notify_wq);
f24e9980 2023 cancel_delayed_work_sync(&osdc->timeout_work);
f5a2041b 2024 cancel_delayed_work_sync(&osdc->osds_timeout_work);
f24e9980
SW
2025 if (osdc->osdmap) {
2026 ceph_osdmap_destroy(osdc->osdmap);
2027 osdc->osdmap = NULL;
2028 }
aca420bc 2029 remove_all_osds(osdc);
f24e9980
SW
2030 mempool_destroy(osdc->req_mempool);
2031 ceph_msgpool_destroy(&osdc->msgpool_op);
c16e7869 2032 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
f24e9980
SW
2033}
2034
2035/*
2036 * Read some contiguous pages. If we cross a stripe boundary, shorten
2037 * *plen. Return number of bytes read, or error.
2038 */
2039int ceph_osdc_readpages(struct ceph_osd_client *osdc,
2040 struct ceph_vino vino, struct ceph_file_layout *layout,
2041 u64 off, u64 *plen,
2042 u32 truncate_seq, u64 truncate_size,
b7495fc2 2043 struct page **pages, int num_pages, int page_align)
f24e9980
SW
2044{
2045 struct ceph_osd_request *req;
0fff87ec 2046 struct ceph_osd_data *osd_data;
acead002 2047 struct ceph_osd_req_op op;
f24e9980
SW
2048 int rc = 0;
2049
2050 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
2051 vino.snap, off, *plen);
acead002 2052 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1, &op,
f24e9980 2053 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
acead002 2054 NULL, truncate_seq, truncate_size,
153e5167 2055 false);
6816282d
SW
2056 if (IS_ERR(req))
2057 return PTR_ERR(req);
f24e9980 2058
acead002
AE
2059 ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL);
2060
f24e9980 2061 /* it may be a short read due to an object boundary */
0fff87ec
AE
2062
2063 osd_data = &req->r_data_in;
2064 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
2065 osd_data->pages = pages;
e0c59487 2066 osd_data->length = *plen;
0fff87ec 2067 osd_data->alignment = page_align;
f24e9980 2068
e0c59487
AE
2069 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
2070 off, *plen, osd_data->length, page_align);
f24e9980
SW
2071
2072 rc = ceph_osdc_start_request(osdc, req, false);
2073 if (!rc)
2074 rc = ceph_osdc_wait_request(osdc, req);
2075
2076 ceph_osdc_put_request(req);
2077 dout("readpages result %d\n", rc);
2078 return rc;
2079}
3d14c5d2 2080EXPORT_SYMBOL(ceph_osdc_readpages);
f24e9980
SW
2081
2082/*
2083 * do a synchronous write on N pages
2084 */
2085int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
2086 struct ceph_file_layout *layout,
2087 struct ceph_snap_context *snapc,
2088 u64 off, u64 len,
2089 u32 truncate_seq, u64 truncate_size,
2090 struct timespec *mtime,
24808826 2091 struct page **pages, int num_pages)
f24e9980
SW
2092{
2093 struct ceph_osd_request *req;
0fff87ec 2094 struct ceph_osd_data *osd_data;
acead002 2095 struct ceph_osd_req_op op;
f24e9980 2096 int rc = 0;
b7495fc2 2097 int page_align = off & ~PAGE_MASK;
f24e9980 2098
acead002
AE
2099 BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */
2100 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1, &op,
f24e9980 2101 CEPH_OSD_OP_WRITE,
24808826 2102 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
acead002 2103 snapc, truncate_seq, truncate_size,
153e5167 2104 true);
6816282d
SW
2105 if (IS_ERR(req))
2106 return PTR_ERR(req);
f24e9980 2107
acead002
AE
2108 ceph_osdc_build_request(req, off, 1, &op, snapc, CEPH_NOSNAP, mtime);
2109
f24e9980 2110 /* it may be a short write due to an object boundary */
0fff87ec
AE
2111 osd_data = &req->r_data_out;
2112 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
2113 osd_data->pages = pages;
e0c59487 2114 osd_data->length = len;
0fff87ec 2115 osd_data->alignment = page_align;
e0c59487 2116 dout("writepages %llu~%llu (%llu bytes)\n", off, len, osd_data->length);
f24e9980 2117
87f979d3 2118 rc = ceph_osdc_start_request(osdc, req, true);
f24e9980
SW
2119 if (!rc)
2120 rc = ceph_osdc_wait_request(osdc, req);
2121
2122 ceph_osdc_put_request(req);
2123 if (rc == 0)
2124 rc = len;
2125 dout("writepages result %d\n", rc);
2126 return rc;
2127}
3d14c5d2 2128EXPORT_SYMBOL(ceph_osdc_writepages);
f24e9980
SW
2129
2130/*
2131 * handle incoming message
2132 */
2133static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2134{
2135 struct ceph_osd *osd = con->private;
32c895e7 2136 struct ceph_osd_client *osdc;
f24e9980
SW
2137 int type = le16_to_cpu(msg->hdr.type);
2138
2139 if (!osd)
4a32f93d 2140 goto out;
32c895e7 2141 osdc = osd->o_osdc;
f24e9980
SW
2142
2143 switch (type) {
2144 case CEPH_MSG_OSD_MAP:
2145 ceph_osdc_handle_map(osdc, msg);
2146 break;
2147 case CEPH_MSG_OSD_OPREPLY:
350b1c32 2148 handle_reply(osdc, msg, con);
f24e9980 2149 break;
a40c4f10
YS
2150 case CEPH_MSG_WATCH_NOTIFY:
2151 handle_watch_notify(osdc, msg);
2152 break;
f24e9980
SW
2153
2154 default:
2155 pr_err("received unknown message type %d %s\n", type,
2156 ceph_msg_type_name(type));
2157 }
4a32f93d 2158out:
f24e9980
SW
2159 ceph_msg_put(msg);
2160}
2161
5b3a4db3 2162/*
21b667f6
SW
2163 * lookup and return message for incoming reply. set up reply message
2164 * pages.
5b3a4db3
SW
2165 */
2166static struct ceph_msg *get_reply(struct ceph_connection *con,
2450418c
YS
2167 struct ceph_msg_header *hdr,
2168 int *skip)
f24e9980
SW
2169{
2170 struct ceph_osd *osd = con->private;
2171 struct ceph_osd_client *osdc = osd->o_osdc;
2450418c 2172 struct ceph_msg *m;
0547a9b3 2173 struct ceph_osd_request *req;
5b3a4db3
SW
2174 int front = le32_to_cpu(hdr->front_len);
2175 int data_len = le32_to_cpu(hdr->data_len);
0547a9b3 2176 u64 tid;
f24e9980 2177
0547a9b3
YS
2178 tid = le64_to_cpu(hdr->tid);
2179 mutex_lock(&osdc->request_mutex);
2180 req = __lookup_request(osdc, tid);
2181 if (!req) {
2182 *skip = 1;
2183 m = NULL;
756a16a5
SW
2184 dout("get_reply unknown tid %llu from osd%d\n", tid,
2185 osd->o_osd);
0547a9b3
YS
2186 goto out;
2187 }
c16e7869 2188
ace6d3a9 2189 if (req->r_reply->con)
8921d114 2190 dout("%s revoking msg %p from old con %p\n", __func__,
ace6d3a9
AE
2191 req->r_reply, req->r_reply->con);
2192 ceph_msg_revoke_incoming(req->r_reply);
0547a9b3 2193
c16e7869
SW
2194 if (front > req->r_reply->front.iov_len) {
2195 pr_warning("get_reply front %d > preallocated %d\n",
2196 front, (int)req->r_reply->front.iov_len);
b61c2763 2197 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
a79832f2 2198 if (!m)
c16e7869
SW
2199 goto out;
2200 ceph_msg_put(req->r_reply);
2201 req->r_reply = m;
2202 }
2203 m = ceph_msg_get(req->r_reply);
2204
0547a9b3 2205 if (data_len > 0) {
0fff87ec
AE
2206 struct ceph_osd_data *osd_data = &req->r_data_in;
2207
2208 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
0fff87ec 2209 if (osd_data->pages &&
e0c59487 2210 unlikely(osd_data->length < data_len)) {
2ac2b7a6 2211
e0c59487
AE
2212 pr_warning("tid %lld reply has %d bytes "
2213 "we had only %llu bytes ready\n",
2214 tid, data_len, osd_data->length);
2ac2b7a6
AE
2215 *skip = 1;
2216 ceph_msg_put(m);
2217 m = NULL;
2218 goto out;
2219 }
2ac2b7a6 2220 }
0547a9b3 2221 }
5b3a4db3 2222 *skip = 0;
c16e7869 2223 dout("get_reply tid %lld %p\n", tid, m);
0547a9b3
YS
2224
2225out:
2226 mutex_unlock(&osdc->request_mutex);
2450418c 2227 return m;
5b3a4db3
SW
2228
2229}
2230
2231static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2232 struct ceph_msg_header *hdr,
2233 int *skip)
2234{
2235 struct ceph_osd *osd = con->private;
2236 int type = le16_to_cpu(hdr->type);
2237 int front = le32_to_cpu(hdr->front_len);
2238
1c20f2d2 2239 *skip = 0;
5b3a4db3
SW
2240 switch (type) {
2241 case CEPH_MSG_OSD_MAP:
a40c4f10 2242 case CEPH_MSG_WATCH_NOTIFY:
b61c2763 2243 return ceph_msg_new(type, front, GFP_NOFS, false);
5b3a4db3
SW
2244 case CEPH_MSG_OSD_OPREPLY:
2245 return get_reply(con, hdr, skip);
2246 default:
2247 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
2248 osd->o_osd);
2249 *skip = 1;
2250 return NULL;
2251 }
f24e9980
SW
2252}
2253
2254/*
2255 * Wrappers to refcount containing ceph_osd struct
2256 */
2257static struct ceph_connection *get_osd_con(struct ceph_connection *con)
2258{
2259 struct ceph_osd *osd = con->private;
2260 if (get_osd(osd))
2261 return con;
2262 return NULL;
2263}
2264
2265static void put_osd_con(struct ceph_connection *con)
2266{
2267 struct ceph_osd *osd = con->private;
2268 put_osd(osd);
2269}
2270
4e7a5dcd
SW
2271/*
2272 * authentication
2273 */
a3530df3
AE
2274/*
2275 * Note: returned pointer is the address of a structure that's
2276 * managed separately. Caller must *not* attempt to free it.
2277 */
2278static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
8f43fb53 2279 int *proto, int force_new)
4e7a5dcd
SW
2280{
2281 struct ceph_osd *o = con->private;
2282 struct ceph_osd_client *osdc = o->o_osdc;
2283 struct ceph_auth_client *ac = osdc->client->monc.auth;
74f1869f 2284 struct ceph_auth_handshake *auth = &o->o_auth;
4e7a5dcd 2285
74f1869f 2286 if (force_new && auth->authorizer) {
27859f97 2287 ceph_auth_destroy_authorizer(ac, auth->authorizer);
74f1869f
AE
2288 auth->authorizer = NULL;
2289 }
27859f97
SW
2290 if (!auth->authorizer) {
2291 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
2292 auth);
4e7a5dcd 2293 if (ret)
a3530df3 2294 return ERR_PTR(ret);
27859f97
SW
2295 } else {
2296 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
0bed9b5c
SW
2297 auth);
2298 if (ret)
2299 return ERR_PTR(ret);
4e7a5dcd 2300 }
4e7a5dcd 2301 *proto = ac->protocol;
74f1869f 2302
a3530df3 2303 return auth;
4e7a5dcd
SW
2304}
2305
2306
2307static int verify_authorizer_reply(struct ceph_connection *con, int len)
2308{
2309 struct ceph_osd *o = con->private;
2310 struct ceph_osd_client *osdc = o->o_osdc;
2311 struct ceph_auth_client *ac = osdc->client->monc.auth;
2312
27859f97 2313 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
4e7a5dcd
SW
2314}
2315
9bd2e6f8
SW
2316static int invalidate_authorizer(struct ceph_connection *con)
2317{
2318 struct ceph_osd *o = con->private;
2319 struct ceph_osd_client *osdc = o->o_osdc;
2320 struct ceph_auth_client *ac = osdc->client->monc.auth;
2321
27859f97 2322 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
9bd2e6f8
SW
2323 return ceph_monc_validate_auth(&osdc->client->monc);
2324}
4e7a5dcd 2325
9e32789f 2326static const struct ceph_connection_operations osd_con_ops = {
f24e9980
SW
2327 .get = get_osd_con,
2328 .put = put_osd_con,
2329 .dispatch = dispatch,
4e7a5dcd
SW
2330 .get_authorizer = get_authorizer,
2331 .verify_authorizer_reply = verify_authorizer_reply,
9bd2e6f8 2332 .invalidate_authorizer = invalidate_authorizer,
f24e9980 2333 .alloc_msg = alloc_msg,
81b024e7 2334 .fault = osd_reset,
f24e9980 2335};