libceph: allocate dummy osdmap in ceph_osdc_init()
[linux-2.6-block.git] / net / ceph / osd_client.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/err.h>
6 #include <linux/highmem.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
11 #ifdef CONFIG_BLOCK
12 #include <linux/bio.h>
13 #endif
14
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
21
22 #define OSD_OPREPLY_FRONT_LEN   512
23
24 static struct kmem_cache        *ceph_osd_request_cache;
25
26 static const struct ceph_connection_operations osd_con_ops;
27
28 static void __send_queued(struct ceph_osd_client *osdc);
29 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
30 static void __register_request(struct ceph_osd_client *osdc,
31                                struct ceph_osd_request *req);
32 static void __unregister_request(struct ceph_osd_client *osdc,
33                                  struct ceph_osd_request *req);
34 static void __unregister_linger_request(struct ceph_osd_client *osdc,
35                                         struct ceph_osd_request *req);
36 static void __enqueue_request(struct ceph_osd_request *req);
37
38 /*
39  * Implement client access to distributed object storage cluster.
40  *
41  * All data objects are stored within a cluster/cloud of OSDs, or
42  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
43  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
44  * remote daemons serving up and coordinating consistent and safe
45  * access to storage.
46  *
47  * Cluster membership and the mapping of data objects onto storage devices
48  * are described by the osd map.
49  *
50  * We keep track of pending OSD requests (read, write), resubmit
51  * requests to different OSDs when the cluster topology/data layout
52  * change, or retry the affected requests when the communications
53  * channel with an OSD is reset.
54  */
55
56 /*
57  * calculate the mapping of a file extent onto an object, and fill out the
58  * request accordingly.  shorten extent as necessary if it crosses an
59  * object boundary.
60  *
61  * fill osd op in request message.
62  */
63 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
64                         u64 *objnum, u64 *objoff, u64 *objlen)
65 {
66         u64 orig_len = *plen;
67         int r;
68
69         /* object extent? */
70         r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
71                                           objoff, objlen);
72         if (r < 0)
73                 return r;
74         if (*objlen < orig_len) {
75                 *plen = *objlen;
76                 dout(" skipping last %llu, final file extent %llu~%llu\n",
77                      orig_len - *plen, off, *plen);
78         }
79
80         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
81
82         return 0;
83 }
84
85 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
86 {
87         memset(osd_data, 0, sizeof (*osd_data));
88         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
89 }
90
91 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
92                         struct page **pages, u64 length, u32 alignment,
93                         bool pages_from_pool, bool own_pages)
94 {
95         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
96         osd_data->pages = pages;
97         osd_data->length = length;
98         osd_data->alignment = alignment;
99         osd_data->pages_from_pool = pages_from_pool;
100         osd_data->own_pages = own_pages;
101 }
102
103 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
104                         struct ceph_pagelist *pagelist)
105 {
106         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
107         osd_data->pagelist = pagelist;
108 }
109
110 #ifdef CONFIG_BLOCK
111 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
112                         struct bio *bio, size_t bio_length)
113 {
114         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
115         osd_data->bio = bio;
116         osd_data->bio_length = bio_length;
117 }
118 #endif /* CONFIG_BLOCK */
119
120 #define osd_req_op_data(oreq, whch, typ, fld)                           \
121 ({                                                                      \
122         struct ceph_osd_request *__oreq = (oreq);                       \
123         unsigned int __whch = (whch);                                   \
124         BUG_ON(__whch >= __oreq->r_num_ops);                            \
125         &__oreq->r_ops[__whch].typ.fld;                                 \
126 })
127
128 static struct ceph_osd_data *
129 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
130 {
131         BUG_ON(which >= osd_req->r_num_ops);
132
133         return &osd_req->r_ops[which].raw_data_in;
134 }
135
136 struct ceph_osd_data *
137 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
138                         unsigned int which)
139 {
140         return osd_req_op_data(osd_req, which, extent, osd_data);
141 }
142 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
143
144 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
145                         unsigned int which, struct page **pages,
146                         u64 length, u32 alignment,
147                         bool pages_from_pool, bool own_pages)
148 {
149         struct ceph_osd_data *osd_data;
150
151         osd_data = osd_req_op_raw_data_in(osd_req, which);
152         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
153                                 pages_from_pool, own_pages);
154 }
155 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
156
157 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
158                         unsigned int which, struct page **pages,
159                         u64 length, u32 alignment,
160                         bool pages_from_pool, bool own_pages)
161 {
162         struct ceph_osd_data *osd_data;
163
164         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
165         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
166                                 pages_from_pool, own_pages);
167 }
168 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
169
170 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
171                         unsigned int which, struct ceph_pagelist *pagelist)
172 {
173         struct ceph_osd_data *osd_data;
174
175         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
176         ceph_osd_data_pagelist_init(osd_data, pagelist);
177 }
178 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
179
180 #ifdef CONFIG_BLOCK
181 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
182                         unsigned int which, struct bio *bio, size_t bio_length)
183 {
184         struct ceph_osd_data *osd_data;
185
186         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
187         ceph_osd_data_bio_init(osd_data, bio, bio_length);
188 }
189 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
190 #endif /* CONFIG_BLOCK */
191
192 static void osd_req_op_cls_request_info_pagelist(
193                         struct ceph_osd_request *osd_req,
194                         unsigned int which, struct ceph_pagelist *pagelist)
195 {
196         struct ceph_osd_data *osd_data;
197
198         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
199         ceph_osd_data_pagelist_init(osd_data, pagelist);
200 }
201
202 void osd_req_op_cls_request_data_pagelist(
203                         struct ceph_osd_request *osd_req,
204                         unsigned int which, struct ceph_pagelist *pagelist)
205 {
206         struct ceph_osd_data *osd_data;
207
208         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
209         ceph_osd_data_pagelist_init(osd_data, pagelist);
210         osd_req->r_ops[which].cls.indata_len += pagelist->length;
211         osd_req->r_ops[which].indata_len += pagelist->length;
212 }
213 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
214
215 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
216                         unsigned int which, struct page **pages, u64 length,
217                         u32 alignment, bool pages_from_pool, bool own_pages)
218 {
219         struct ceph_osd_data *osd_data;
220
221         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
222         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
223                                 pages_from_pool, own_pages);
224         osd_req->r_ops[which].cls.indata_len += length;
225         osd_req->r_ops[which].indata_len += length;
226 }
227 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
228
229 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
230                         unsigned int which, struct page **pages, u64 length,
231                         u32 alignment, bool pages_from_pool, bool own_pages)
232 {
233         struct ceph_osd_data *osd_data;
234
235         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
236         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
237                                 pages_from_pool, own_pages);
238 }
239 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
240
241 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
242 {
243         switch (osd_data->type) {
244         case CEPH_OSD_DATA_TYPE_NONE:
245                 return 0;
246         case CEPH_OSD_DATA_TYPE_PAGES:
247                 return osd_data->length;
248         case CEPH_OSD_DATA_TYPE_PAGELIST:
249                 return (u64)osd_data->pagelist->length;
250 #ifdef CONFIG_BLOCK
251         case CEPH_OSD_DATA_TYPE_BIO:
252                 return (u64)osd_data->bio_length;
253 #endif /* CONFIG_BLOCK */
254         default:
255                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
256                 return 0;
257         }
258 }
259
260 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
261 {
262         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
263                 int num_pages;
264
265                 num_pages = calc_pages_for((u64)osd_data->alignment,
266                                                 (u64)osd_data->length);
267                 ceph_release_page_vector(osd_data->pages, num_pages);
268         }
269         ceph_osd_data_init(osd_data);
270 }
271
272 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
273                         unsigned int which)
274 {
275         struct ceph_osd_req_op *op;
276
277         BUG_ON(which >= osd_req->r_num_ops);
278         op = &osd_req->r_ops[which];
279
280         switch (op->op) {
281         case CEPH_OSD_OP_READ:
282         case CEPH_OSD_OP_WRITE:
283         case CEPH_OSD_OP_WRITEFULL:
284                 ceph_osd_data_release(&op->extent.osd_data);
285                 break;
286         case CEPH_OSD_OP_CALL:
287                 ceph_osd_data_release(&op->cls.request_info);
288                 ceph_osd_data_release(&op->cls.request_data);
289                 ceph_osd_data_release(&op->cls.response_data);
290                 break;
291         case CEPH_OSD_OP_SETXATTR:
292         case CEPH_OSD_OP_CMPXATTR:
293                 ceph_osd_data_release(&op->xattr.osd_data);
294                 break;
295         case CEPH_OSD_OP_STAT:
296                 ceph_osd_data_release(&op->raw_data_in);
297                 break;
298         default:
299                 break;
300         }
301 }
302
303 /*
304  * Assumes @t is zero-initialized.
305  */
306 static void target_init(struct ceph_osd_request_target *t)
307 {
308         ceph_oid_init(&t->base_oid);
309         ceph_oloc_init(&t->base_oloc);
310         ceph_oid_init(&t->target_oid);
311         ceph_oloc_init(&t->target_oloc);
312
313         ceph_osds_init(&t->acting);
314         ceph_osds_init(&t->up);
315         t->size = -1;
316         t->min_size = -1;
317
318         t->osd = CEPH_HOMELESS_OSD;
319 }
320
321 static void target_destroy(struct ceph_osd_request_target *t)
322 {
323         ceph_oid_destroy(&t->base_oid);
324         ceph_oid_destroy(&t->target_oid);
325 }
326
327 /*
328  * requests
329  */
330 static void ceph_osdc_release_request(struct kref *kref)
331 {
332         struct ceph_osd_request *req = container_of(kref,
333                                             struct ceph_osd_request, r_kref);
334         unsigned int which;
335
336         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
337              req->r_request, req->r_reply);
338         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
339         WARN_ON(!list_empty(&req->r_req_lru_item));
340         WARN_ON(!list_empty(&req->r_osd_item));
341         WARN_ON(!list_empty(&req->r_linger_item));
342         WARN_ON(!list_empty(&req->r_linger_osd_item));
343         WARN_ON(req->r_osd);
344
345         if (req->r_request)
346                 ceph_msg_put(req->r_request);
347         if (req->r_reply) {
348                 ceph_msg_revoke_incoming(req->r_reply);
349                 ceph_msg_put(req->r_reply);
350         }
351
352         for (which = 0; which < req->r_num_ops; which++)
353                 osd_req_op_data_release(req, which);
354
355         target_destroy(&req->r_t);
356         ceph_put_snap_context(req->r_snapc);
357
358         if (req->r_mempool)
359                 mempool_free(req, req->r_osdc->req_mempool);
360         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
361                 kmem_cache_free(ceph_osd_request_cache, req);
362         else
363                 kfree(req);
364 }
365
366 void ceph_osdc_get_request(struct ceph_osd_request *req)
367 {
368         dout("%s %p (was %d)\n", __func__, req,
369              atomic_read(&req->r_kref.refcount));
370         kref_get(&req->r_kref);
371 }
372 EXPORT_SYMBOL(ceph_osdc_get_request);
373
374 void ceph_osdc_put_request(struct ceph_osd_request *req)
375 {
376         if (req) {
377                 dout("%s %p (was %d)\n", __func__, req,
378                      atomic_read(&req->r_kref.refcount));
379                 kref_put(&req->r_kref, ceph_osdc_release_request);
380         }
381 }
382 EXPORT_SYMBOL(ceph_osdc_put_request);
383
384 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
385                                                struct ceph_snap_context *snapc,
386                                                unsigned int num_ops,
387                                                bool use_mempool,
388                                                gfp_t gfp_flags)
389 {
390         struct ceph_osd_request *req;
391
392         if (use_mempool) {
393                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
394                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
395         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
396                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
397         } else {
398                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
399                 req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
400                               gfp_flags);
401         }
402         if (unlikely(!req))
403                 return NULL;
404
405         /* req only, each op is zeroed in _osd_req_op_init() */
406         memset(req, 0, sizeof(*req));
407
408         req->r_osdc = osdc;
409         req->r_mempool = use_mempool;
410         req->r_num_ops = num_ops;
411         req->r_snapid = CEPH_NOSNAP;
412         req->r_snapc = ceph_get_snap_context(snapc);
413
414         kref_init(&req->r_kref);
415         init_completion(&req->r_completion);
416         init_completion(&req->r_safe_completion);
417         RB_CLEAR_NODE(&req->r_node);
418         INIT_LIST_HEAD(&req->r_unsafe_item);
419         INIT_LIST_HEAD(&req->r_linger_item);
420         INIT_LIST_HEAD(&req->r_linger_osd_item);
421         INIT_LIST_HEAD(&req->r_req_lru_item);
422         INIT_LIST_HEAD(&req->r_osd_item);
423
424         target_init(&req->r_t);
425
426         dout("%s req %p\n", __func__, req);
427         return req;
428 }
429 EXPORT_SYMBOL(ceph_osdc_alloc_request);
430
431 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
432 {
433         struct ceph_osd_client *osdc = req->r_osdc;
434         struct ceph_msg *msg;
435         int msg_size;
436
437         WARN_ON(ceph_oid_empty(&req->r_base_oid));
438
439         /* create request message */
440         msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
441         msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
442         msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
443         msg_size += 1 + 8 + 4 + 4; /* pgid */
444         msg_size += 4 + req->r_base_oid.name_len; /* oid */
445         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
446         msg_size += 8; /* snapid */
447         msg_size += 8; /* snap_seq */
448         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
449         msg_size += 4; /* retry_attempt */
450
451         if (req->r_mempool)
452                 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
453         else
454                 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
455         if (!msg)
456                 return -ENOMEM;
457
458         memset(msg->front.iov_base, 0, msg->front.iov_len);
459         req->r_request = msg;
460
461         /* create reply message */
462         msg_size = OSD_OPREPLY_FRONT_LEN;
463         msg_size += req->r_base_oid.name_len;
464         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
465
466         if (req->r_mempool)
467                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
468         else
469                 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
470         if (!msg)
471                 return -ENOMEM;
472
473         req->r_reply = msg;
474
475         return 0;
476 }
477 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
478
479 static bool osd_req_opcode_valid(u16 opcode)
480 {
481         switch (opcode) {
482 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
483 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
484 #undef GENERATE_CASE
485         default:
486                 return false;
487         }
488 }
489
490 /*
491  * This is an osd op init function for opcodes that have no data or
492  * other information associated with them.  It also serves as a
493  * common init routine for all the other init functions, below.
494  */
495 static struct ceph_osd_req_op *
496 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
497                  u16 opcode, u32 flags)
498 {
499         struct ceph_osd_req_op *op;
500
501         BUG_ON(which >= osd_req->r_num_ops);
502         BUG_ON(!osd_req_opcode_valid(opcode));
503
504         op = &osd_req->r_ops[which];
505         memset(op, 0, sizeof (*op));
506         op->op = opcode;
507         op->flags = flags;
508
509         return op;
510 }
511
512 void osd_req_op_init(struct ceph_osd_request *osd_req,
513                      unsigned int which, u16 opcode, u32 flags)
514 {
515         (void)_osd_req_op_init(osd_req, which, opcode, flags);
516 }
517 EXPORT_SYMBOL(osd_req_op_init);
518
519 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
520                                 unsigned int which, u16 opcode,
521                                 u64 offset, u64 length,
522                                 u64 truncate_size, u32 truncate_seq)
523 {
524         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
525                                                       opcode, 0);
526         size_t payload_len = 0;
527
528         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
529                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
530                opcode != CEPH_OSD_OP_TRUNCATE);
531
532         op->extent.offset = offset;
533         op->extent.length = length;
534         op->extent.truncate_size = truncate_size;
535         op->extent.truncate_seq = truncate_seq;
536         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
537                 payload_len += length;
538
539         op->indata_len = payload_len;
540 }
541 EXPORT_SYMBOL(osd_req_op_extent_init);
542
543 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
544                                 unsigned int which, u64 length)
545 {
546         struct ceph_osd_req_op *op;
547         u64 previous;
548
549         BUG_ON(which >= osd_req->r_num_ops);
550         op = &osd_req->r_ops[which];
551         previous = op->extent.length;
552
553         if (length == previous)
554                 return;         /* Nothing to do */
555         BUG_ON(length > previous);
556
557         op->extent.length = length;
558         op->indata_len -= previous - length;
559 }
560 EXPORT_SYMBOL(osd_req_op_extent_update);
561
562 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
563                                 unsigned int which, u64 offset_inc)
564 {
565         struct ceph_osd_req_op *op, *prev_op;
566
567         BUG_ON(which + 1 >= osd_req->r_num_ops);
568
569         prev_op = &osd_req->r_ops[which];
570         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
571         /* dup previous one */
572         op->indata_len = prev_op->indata_len;
573         op->outdata_len = prev_op->outdata_len;
574         op->extent = prev_op->extent;
575         /* adjust offset */
576         op->extent.offset += offset_inc;
577         op->extent.length -= offset_inc;
578
579         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
580                 op->indata_len -= offset_inc;
581 }
582 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
583
584 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
585                         u16 opcode, const char *class, const char *method)
586 {
587         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
588                                                       opcode, 0);
589         struct ceph_pagelist *pagelist;
590         size_t payload_len = 0;
591         size_t size;
592
593         BUG_ON(opcode != CEPH_OSD_OP_CALL);
594
595         pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
596         BUG_ON(!pagelist);
597         ceph_pagelist_init(pagelist);
598
599         op->cls.class_name = class;
600         size = strlen(class);
601         BUG_ON(size > (size_t) U8_MAX);
602         op->cls.class_len = size;
603         ceph_pagelist_append(pagelist, class, size);
604         payload_len += size;
605
606         op->cls.method_name = method;
607         size = strlen(method);
608         BUG_ON(size > (size_t) U8_MAX);
609         op->cls.method_len = size;
610         ceph_pagelist_append(pagelist, method, size);
611         payload_len += size;
612
613         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
614
615         op->indata_len = payload_len;
616 }
617 EXPORT_SYMBOL(osd_req_op_cls_init);
618
619 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
620                           u16 opcode, const char *name, const void *value,
621                           size_t size, u8 cmp_op, u8 cmp_mode)
622 {
623         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
624                                                       opcode, 0);
625         struct ceph_pagelist *pagelist;
626         size_t payload_len;
627
628         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
629
630         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
631         if (!pagelist)
632                 return -ENOMEM;
633
634         ceph_pagelist_init(pagelist);
635
636         payload_len = strlen(name);
637         op->xattr.name_len = payload_len;
638         ceph_pagelist_append(pagelist, name, payload_len);
639
640         op->xattr.value_len = size;
641         ceph_pagelist_append(pagelist, value, size);
642         payload_len += size;
643
644         op->xattr.cmp_op = cmp_op;
645         op->xattr.cmp_mode = cmp_mode;
646
647         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
648         op->indata_len = payload_len;
649         return 0;
650 }
651 EXPORT_SYMBOL(osd_req_op_xattr_init);
652
653 void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
654                                 unsigned int which, u16 opcode,
655                                 u64 cookie, u64 version, int flag)
656 {
657         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
658                                                       opcode, 0);
659
660         BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH);
661
662         op->watch.cookie = cookie;
663         op->watch.ver = version;
664         if (opcode == CEPH_OSD_OP_WATCH && flag)
665                 op->watch.flag = (u8)1;
666 }
667 EXPORT_SYMBOL(osd_req_op_watch_init);
668
669 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
670                                 unsigned int which,
671                                 u64 expected_object_size,
672                                 u64 expected_write_size)
673 {
674         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
675                                                       CEPH_OSD_OP_SETALLOCHINT,
676                                                       0);
677
678         op->alloc_hint.expected_object_size = expected_object_size;
679         op->alloc_hint.expected_write_size = expected_write_size;
680
681         /*
682          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
683          * not worth a feature bit.  Set FAILOK per-op flag to make
684          * sure older osds don't trip over an unsupported opcode.
685          */
686         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
687 }
688 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
689
690 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
691                                 struct ceph_osd_data *osd_data)
692 {
693         u64 length = ceph_osd_data_length(osd_data);
694
695         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
696                 BUG_ON(length > (u64) SIZE_MAX);
697                 if (length)
698                         ceph_msg_data_add_pages(msg, osd_data->pages,
699                                         length, osd_data->alignment);
700         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
701                 BUG_ON(!length);
702                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
703 #ifdef CONFIG_BLOCK
704         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
705                 ceph_msg_data_add_bio(msg, osd_data->bio, length);
706 #endif
707         } else {
708                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
709         }
710 }
711
712 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
713                              const struct ceph_osd_req_op *src)
714 {
715         if (WARN_ON(!osd_req_opcode_valid(src->op))) {
716                 pr_err("unrecognized osd opcode %d\n", src->op);
717
718                 return 0;
719         }
720
721         switch (src->op) {
722         case CEPH_OSD_OP_STAT:
723                 break;
724         case CEPH_OSD_OP_READ:
725         case CEPH_OSD_OP_WRITE:
726         case CEPH_OSD_OP_WRITEFULL:
727         case CEPH_OSD_OP_ZERO:
728         case CEPH_OSD_OP_TRUNCATE:
729                 dst->extent.offset = cpu_to_le64(src->extent.offset);
730                 dst->extent.length = cpu_to_le64(src->extent.length);
731                 dst->extent.truncate_size =
732                         cpu_to_le64(src->extent.truncate_size);
733                 dst->extent.truncate_seq =
734                         cpu_to_le32(src->extent.truncate_seq);
735                 break;
736         case CEPH_OSD_OP_CALL:
737                 dst->cls.class_len = src->cls.class_len;
738                 dst->cls.method_len = src->cls.method_len;
739                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
740                 break;
741         case CEPH_OSD_OP_STARTSYNC:
742                 break;
743         case CEPH_OSD_OP_NOTIFY_ACK:
744         case CEPH_OSD_OP_WATCH:
745                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
746                 dst->watch.ver = cpu_to_le64(src->watch.ver);
747                 dst->watch.flag = src->watch.flag;
748                 break;
749         case CEPH_OSD_OP_SETALLOCHINT:
750                 dst->alloc_hint.expected_object_size =
751                     cpu_to_le64(src->alloc_hint.expected_object_size);
752                 dst->alloc_hint.expected_write_size =
753                     cpu_to_le64(src->alloc_hint.expected_write_size);
754                 break;
755         case CEPH_OSD_OP_SETXATTR:
756         case CEPH_OSD_OP_CMPXATTR:
757                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
758                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
759                 dst->xattr.cmp_op = src->xattr.cmp_op;
760                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
761                 break;
762         case CEPH_OSD_OP_CREATE:
763         case CEPH_OSD_OP_DELETE:
764                 break;
765         default:
766                 pr_err("unsupported osd opcode %s\n",
767                         ceph_osd_op_name(src->op));
768                 WARN_ON(1);
769
770                 return 0;
771         }
772
773         dst->op = cpu_to_le16(src->op);
774         dst->flags = cpu_to_le32(src->flags);
775         dst->payload_len = cpu_to_le32(src->indata_len);
776
777         return src->indata_len;
778 }
779
780 /*
781  * build new request AND message, calculate layout, and adjust file
782  * extent as needed.
783  *
784  * if the file was recently truncated, we include information about its
785  * old and new size so that the object can be updated appropriately.  (we
786  * avoid synchronously deleting truncated objects because it's slow.)
787  *
788  * if @do_sync, include a 'startsync' command so that the osd will flush
789  * data quickly.
790  */
791 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
792                                                struct ceph_file_layout *layout,
793                                                struct ceph_vino vino,
794                                                u64 off, u64 *plen,
795                                                unsigned int which, int num_ops,
796                                                int opcode, int flags,
797                                                struct ceph_snap_context *snapc,
798                                                u32 truncate_seq,
799                                                u64 truncate_size,
800                                                bool use_mempool)
801 {
802         struct ceph_osd_request *req;
803         u64 objnum = 0;
804         u64 objoff = 0;
805         u64 objlen = 0;
806         int r;
807
808         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
809                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
810                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
811
812         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
813                                         GFP_NOFS);
814         if (!req) {
815                 r = -ENOMEM;
816                 goto fail;
817         }
818
819         /* calculate max write size */
820         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
821         if (r)
822                 goto fail;
823
824         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
825                 osd_req_op_init(req, which, opcode, 0);
826         } else {
827                 u32 object_size = le32_to_cpu(layout->fl_object_size);
828                 u32 object_base = off - objoff;
829                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
830                         if (truncate_size <= object_base) {
831                                 truncate_size = 0;
832                         } else {
833                                 truncate_size -= object_base;
834                                 if (truncate_size > object_size)
835                                         truncate_size = object_size;
836                         }
837                 }
838                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
839                                        truncate_size, truncate_seq);
840         }
841
842         req->r_flags = flags;
843         req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
844         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
845
846         req->r_snapid = vino.snap;
847         if (flags & CEPH_OSD_FLAG_WRITE)
848                 req->r_data_offset = off;
849
850         r = ceph_osdc_alloc_messages(req, GFP_NOFS);
851         if (r)
852                 goto fail;
853
854         return req;
855
856 fail:
857         ceph_osdc_put_request(req);
858         return ERR_PTR(r);
859 }
860 EXPORT_SYMBOL(ceph_osdc_new_request);
861
862 /*
863  * We keep osd requests in an rbtree, sorted by ->r_tid.
864  */
865 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
866
867 static struct ceph_osd_request *
868 __lookup_request_ge(struct ceph_osd_client *osdc,
869                     u64 tid)
870 {
871         struct ceph_osd_request *req;
872         struct rb_node *n = osdc->requests.rb_node;
873
874         while (n) {
875                 req = rb_entry(n, struct ceph_osd_request, r_node);
876                 if (tid < req->r_tid) {
877                         if (!n->rb_left)
878                                 return req;
879                         n = n->rb_left;
880                 } else if (tid > req->r_tid) {
881                         n = n->rb_right;
882                 } else {
883                         return req;
884                 }
885         }
886         return NULL;
887 }
888
889 static void __kick_linger_request(struct ceph_osd_request *req)
890 {
891         struct ceph_osd_client *osdc = req->r_osdc;
892         struct ceph_osd *osd = req->r_osd;
893
894         /*
895          * Linger requests need to be resent with a new tid to avoid
896          * the dup op detection logic on the OSDs.  Achieve this with
897          * a re-register dance instead of open-coding.
898          */
899         ceph_osdc_get_request(req);
900         if (!list_empty(&req->r_linger_item))
901                 __unregister_linger_request(osdc, req);
902         else
903                 __unregister_request(osdc, req);
904         __register_request(osdc, req);
905         ceph_osdc_put_request(req);
906
907         /*
908          * Unless request has been registered as both normal and
909          * lingering, __unregister{,_linger}_request clears r_osd.
910          * However, here we need to preserve r_osd to make sure we
911          * requeue on the same OSD.
912          */
913         WARN_ON(req->r_osd || !osd);
914         req->r_osd = osd;
915
916         dout("%s requeueing %p tid %llu\n", __func__, req, req->r_tid);
917         __enqueue_request(req);
918 }
919
920 /*
921  * Resubmit requests pending on the given osd.
922  */
923 static void __kick_osd_requests(struct ceph_osd_client *osdc,
924                                 struct ceph_osd *osd)
925 {
926         struct ceph_osd_request *req, *nreq;
927         LIST_HEAD(resend);
928         LIST_HEAD(resend_linger);
929         int err;
930
931         dout("%s osd%d\n", __func__, osd->o_osd);
932         err = __reset_osd(osdc, osd);
933         if (err)
934                 return;
935
936         /*
937          * Build up a list of requests to resend by traversing the
938          * osd's list of requests.  Requests for a given object are
939          * sent in tid order, and that is also the order they're
940          * kept on this list.  Therefore all requests that are in
941          * flight will be found first, followed by all requests that
942          * have not yet been sent.  And to resend requests while
943          * preserving this order we will want to put any sent
944          * requests back on the front of the osd client's unsent
945          * list.
946          *
947          * So we build a separate ordered list of already-sent
948          * requests for the affected osd and splice it onto the
949          * front of the osd client's unsent list.  Once we've seen a
950          * request that has not yet been sent we're done.  Those
951          * requests are already sitting right where they belong.
952          */
953         list_for_each_entry(req, &osd->o_requests, r_osd_item) {
954                 if (!req->r_sent)
955                         break;
956
957                 if (!req->r_linger) {
958                         dout("%s requeueing %p tid %llu\n", __func__, req,
959                              req->r_tid);
960                         list_move_tail(&req->r_req_lru_item, &resend);
961                         req->r_flags |= CEPH_OSD_FLAG_RETRY;
962                 } else {
963                         list_move_tail(&req->r_req_lru_item, &resend_linger);
964                 }
965         }
966         list_splice(&resend, &osdc->req_unsent);
967
968         /*
969          * Both registered and not yet registered linger requests are
970          * enqueued with a new tid on the same OSD.  We add/move them
971          * to req_unsent/o_requests at the end to keep things in tid
972          * order.
973          */
974         list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
975                                  r_linger_osd_item) {
976                 WARN_ON(!list_empty(&req->r_req_lru_item));
977                 __kick_linger_request(req);
978         }
979
980         list_for_each_entry_safe(req, nreq, &resend_linger, r_req_lru_item)
981                 __kick_linger_request(req);
982 }
983
984 /*
985  * If the osd connection drops, we need to resubmit all requests.
986  */
987 static void osd_reset(struct ceph_connection *con)
988 {
989         struct ceph_osd *osd = con->private;
990         struct ceph_osd_client *osdc;
991
992         if (!osd)
993                 return;
994         dout("osd_reset osd%d\n", osd->o_osd);
995         osdc = osd->o_osdc;
996         down_read(&osdc->map_sem);
997         mutex_lock(&osdc->request_mutex);
998         __kick_osd_requests(osdc, osd);
999         __send_queued(osdc);
1000         mutex_unlock(&osdc->request_mutex);
1001         up_read(&osdc->map_sem);
1002 }
1003
1004 /*
1005  * Track open sessions with osds.
1006  */
1007 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1008 {
1009         struct ceph_osd *osd;
1010
1011         osd = kzalloc(sizeof(*osd), GFP_NOFS);
1012         if (!osd)
1013                 return NULL;
1014
1015         atomic_set(&osd->o_ref, 1);
1016         osd->o_osdc = osdc;
1017         osd->o_osd = onum;
1018         RB_CLEAR_NODE(&osd->o_node);
1019         INIT_LIST_HEAD(&osd->o_requests);
1020         INIT_LIST_HEAD(&osd->o_linger_requests);
1021         INIT_LIST_HEAD(&osd->o_osd_lru);
1022         osd->o_incarnation = 1;
1023
1024         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1025
1026         INIT_LIST_HEAD(&osd->o_keepalive_item);
1027         return osd;
1028 }
1029
1030 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1031 {
1032         if (atomic_inc_not_zero(&osd->o_ref)) {
1033                 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
1034                      atomic_read(&osd->o_ref));
1035                 return osd;
1036         } else {
1037                 dout("get_osd %p FAIL\n", osd);
1038                 return NULL;
1039         }
1040 }
1041
1042 static void put_osd(struct ceph_osd *osd)
1043 {
1044         dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
1045              atomic_read(&osd->o_ref) - 1);
1046         if (atomic_dec_and_test(&osd->o_ref)) {
1047                 if (osd->o_auth.authorizer)
1048                         ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1049                 kfree(osd);
1050         }
1051 }
1052
1053 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1054
1055 /*
1056  * remove an osd from our map
1057  */
1058 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
1059 {
1060         dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
1061         WARN_ON(!list_empty(&osd->o_requests));
1062         WARN_ON(!list_empty(&osd->o_linger_requests));
1063
1064         list_del_init(&osd->o_osd_lru);
1065         erase_osd(&osdc->osds, osd);
1066 }
1067
1068 static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
1069 {
1070         dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
1071
1072         if (!RB_EMPTY_NODE(&osd->o_node)) {
1073                 ceph_con_close(&osd->o_con);
1074                 __remove_osd(osdc, osd);
1075                 put_osd(osd);
1076         }
1077 }
1078
1079 static void __move_osd_to_lru(struct ceph_osd_client *osdc,
1080                               struct ceph_osd *osd)
1081 {
1082         dout("%s %p\n", __func__, osd);
1083         BUG_ON(!list_empty(&osd->o_osd_lru));
1084
1085         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1086         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1087 }
1088
1089 static void maybe_move_osd_to_lru(struct ceph_osd_client *osdc,
1090                                   struct ceph_osd *osd)
1091 {
1092         dout("%s %p\n", __func__, osd);
1093
1094         if (list_empty(&osd->o_requests) &&
1095             list_empty(&osd->o_linger_requests))
1096                 __move_osd_to_lru(osdc, osd);
1097 }
1098
1099 static void __remove_osd_from_lru(struct ceph_osd *osd)
1100 {
1101         dout("__remove_osd_from_lru %p\n", osd);
1102         if (!list_empty(&osd->o_osd_lru))
1103                 list_del_init(&osd->o_osd_lru);
1104 }
1105
1106 /*
1107  * reset osd connect
1108  */
1109 static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
1110 {
1111         struct ceph_entity_addr *peer_addr;
1112
1113         dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
1114         if (list_empty(&osd->o_requests) &&
1115             list_empty(&osd->o_linger_requests)) {
1116                 remove_osd(osdc, osd);
1117                 return -ENODEV;
1118         }
1119
1120         peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
1121         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1122                         !ceph_con_opened(&osd->o_con)) {
1123                 struct ceph_osd_request *req;
1124
1125                 dout("osd addr hasn't changed and connection never opened, "
1126                      "letting msgr retry\n");
1127                 /* touch each r_stamp for handle_timeout()'s benfit */
1128                 list_for_each_entry(req, &osd->o_requests, r_osd_item)
1129                         req->r_stamp = jiffies;
1130
1131                 return -EAGAIN;
1132         }
1133
1134         ceph_con_close(&osd->o_con);
1135         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1136         osd->o_incarnation++;
1137
1138         return 0;
1139 }
1140
1141 /*
1142  * Register request, assign tid.  If this is the first request, set up
1143  * the timeout event.
1144  */
1145 static void __register_request(struct ceph_osd_client *osdc,
1146                                struct ceph_osd_request *req)
1147 {
1148         req->r_tid = ++osdc->last_tid;
1149         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1150         dout("__register_request %p tid %lld\n", req, req->r_tid);
1151         insert_request(&osdc->requests, req);
1152         ceph_osdc_get_request(req);
1153         osdc->num_requests++;
1154 }
1155
1156 /*
1157  * called under osdc->request_mutex
1158  */
1159 static void __unregister_request(struct ceph_osd_client *osdc,
1160                                  struct ceph_osd_request *req)
1161 {
1162         if (RB_EMPTY_NODE(&req->r_node)) {
1163                 dout("__unregister_request %p tid %lld not registered\n",
1164                         req, req->r_tid);
1165                 return;
1166         }
1167
1168         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
1169         erase_request(&osdc->requests, req);
1170         osdc->num_requests--;
1171
1172         if (req->r_osd) {
1173                 /* make sure the original request isn't in flight. */
1174                 ceph_msg_revoke(req->r_request);
1175
1176                 list_del_init(&req->r_osd_item);
1177                 maybe_move_osd_to_lru(osdc, req->r_osd);
1178                 if (list_empty(&req->r_linger_osd_item))
1179                         req->r_osd = NULL;
1180         }
1181
1182         list_del_init(&req->r_req_lru_item);
1183         ceph_osdc_put_request(req);
1184 }
1185
1186 /*
1187  * Cancel a previously queued request message
1188  */
1189 static void __cancel_request(struct ceph_osd_request *req)
1190 {
1191         if (req->r_sent && req->r_osd) {
1192                 ceph_msg_revoke(req->r_request);
1193                 req->r_sent = 0;
1194         }
1195 }
1196
1197 static void __register_linger_request(struct ceph_osd_client *osdc,
1198                                     struct ceph_osd_request *req)
1199 {
1200         dout("%s %p tid %llu\n", __func__, req, req->r_tid);
1201         WARN_ON(!req->r_linger);
1202
1203         ceph_osdc_get_request(req);
1204         list_add_tail(&req->r_linger_item, &osdc->req_linger);
1205         if (req->r_osd)
1206                 list_add_tail(&req->r_linger_osd_item,
1207                               &req->r_osd->o_linger_requests);
1208 }
1209
1210 static void __unregister_linger_request(struct ceph_osd_client *osdc,
1211                                         struct ceph_osd_request *req)
1212 {
1213         WARN_ON(!req->r_linger);
1214
1215         if (list_empty(&req->r_linger_item)) {
1216                 dout("%s %p tid %llu not registered\n", __func__, req,
1217                      req->r_tid);
1218                 return;
1219         }
1220
1221         dout("%s %p tid %llu\n", __func__, req, req->r_tid);
1222         list_del_init(&req->r_linger_item);
1223
1224         if (req->r_osd) {
1225                 list_del_init(&req->r_linger_osd_item);
1226                 maybe_move_osd_to_lru(osdc, req->r_osd);
1227                 if (list_empty(&req->r_osd_item))
1228                         req->r_osd = NULL;
1229         }
1230         ceph_osdc_put_request(req);
1231 }
1232
1233 void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
1234                                   struct ceph_osd_request *req)
1235 {
1236         if (!req->r_linger) {
1237                 dout("set_request_linger %p\n", req);
1238                 req->r_linger = 1;
1239         }
1240 }
1241 EXPORT_SYMBOL(ceph_osdc_set_request_linger);
1242
1243 static bool __pool_full(struct ceph_pg_pool_info *pi)
1244 {
1245         return pi->flags & CEPH_POOL_FLAG_FULL;
1246 }
1247
1248 /*
1249  * Returns whether a request should be blocked from being sent
1250  * based on the current osdmap and osd_client settings.
1251  *
1252  * Caller should hold map_sem for read.
1253  */
1254 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1255                                     const struct ceph_osd_request_target *t,
1256                                     struct ceph_pg_pool_info *pi)
1257 {
1258         bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
1259         bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
1260                        ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
1261                        __pool_full(pi);
1262
1263         WARN_ON(pi->id != t->base_oloc.pool);
1264         return (t->flags & CEPH_OSD_FLAG_READ && pauserd) ||
1265                (t->flags & CEPH_OSD_FLAG_WRITE && pausewr);
1266 }
1267
1268 enum calc_target_result {
1269         CALC_TARGET_NO_ACTION = 0,
1270         CALC_TARGET_NEED_RESEND,
1271         CALC_TARGET_POOL_DNE,
1272 };
1273
1274 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1275                                            struct ceph_osd_request_target *t,
1276                                            u32 *last_force_resend,
1277                                            bool any_change)
1278 {
1279         struct ceph_pg_pool_info *pi;
1280         struct ceph_pg pgid, last_pgid;
1281         struct ceph_osds up, acting;
1282         bool force_resend = false;
1283         bool need_check_tiering = false;
1284         bool need_resend = false;
1285         bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap,
1286                                              CEPH_OSDMAP_SORTBITWISE);
1287         enum calc_target_result ct_res;
1288         int ret;
1289
1290         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1291         if (!pi) {
1292                 t->osd = CEPH_HOMELESS_OSD;
1293                 ct_res = CALC_TARGET_POOL_DNE;
1294                 goto out;
1295         }
1296
1297         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1298                 if (last_force_resend &&
1299                     *last_force_resend < pi->last_force_request_resend) {
1300                         *last_force_resend = pi->last_force_request_resend;
1301                         force_resend = true;
1302                 } else if (!last_force_resend) {
1303                         force_resend = true;
1304                 }
1305         }
1306         if (ceph_oid_empty(&t->target_oid) || force_resend) {
1307                 ceph_oid_copy(&t->target_oid, &t->base_oid);
1308                 need_check_tiering = true;
1309         }
1310         if (ceph_oloc_empty(&t->target_oloc) || force_resend) {
1311                 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1312                 need_check_tiering = true;
1313         }
1314
1315         if (need_check_tiering &&
1316             (t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1317                 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1318                         t->target_oloc.pool = pi->read_tier;
1319                 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1320                         t->target_oloc.pool = pi->write_tier;
1321         }
1322
1323         ret = ceph_object_locator_to_pg(osdc->osdmap, &t->target_oid,
1324                                         &t->target_oloc, &pgid);
1325         if (ret) {
1326                 WARN_ON(ret != -ENOENT);
1327                 t->osd = CEPH_HOMELESS_OSD;
1328                 ct_res = CALC_TARGET_POOL_DNE;
1329                 goto out;
1330         }
1331         last_pgid.pool = pgid.pool;
1332         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1333
1334         ceph_pg_to_up_acting_osds(osdc->osdmap, &pgid, &up, &acting);
1335         if (any_change &&
1336             ceph_is_new_interval(&t->acting,
1337                                  &acting,
1338                                  &t->up,
1339                                  &up,
1340                                  t->size,
1341                                  pi->size,
1342                                  t->min_size,
1343                                  pi->min_size,
1344                                  t->pg_num,
1345                                  pi->pg_num,
1346                                  t->sort_bitwise,
1347                                  sort_bitwise,
1348                                  &last_pgid))
1349                 force_resend = true;
1350
1351         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1352                 t->paused = false;
1353                 need_resend = true;
1354         }
1355
1356         if (ceph_pg_compare(&t->pgid, &pgid) ||
1357             ceph_osds_changed(&t->acting, &acting, any_change) ||
1358             force_resend) {
1359                 t->pgid = pgid; /* struct */
1360                 ceph_osds_copy(&t->acting, &acting);
1361                 ceph_osds_copy(&t->up, &up);
1362                 t->size = pi->size;
1363                 t->min_size = pi->min_size;
1364                 t->pg_num = pi->pg_num;
1365                 t->pg_num_mask = pi->pg_num_mask;
1366                 t->sort_bitwise = sort_bitwise;
1367
1368                 t->osd = acting.primary;
1369                 need_resend = true;
1370         }
1371
1372         ct_res = need_resend ? CALC_TARGET_NEED_RESEND : CALC_TARGET_NO_ACTION;
1373 out:
1374         dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1375         return ct_res;
1376 }
1377
1378 static void __enqueue_request(struct ceph_osd_request *req)
1379 {
1380         struct ceph_osd_client *osdc = req->r_osdc;
1381
1382         dout("%s %p tid %llu to osd%d\n", __func__, req, req->r_tid,
1383              req->r_osd ? req->r_osd->o_osd : -1);
1384
1385         if (req->r_osd) {
1386                 __remove_osd_from_lru(req->r_osd);
1387                 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
1388                 list_move_tail(&req->r_req_lru_item, &osdc->req_unsent);
1389         } else {
1390                 list_move_tail(&req->r_req_lru_item, &osdc->req_notarget);
1391         }
1392 }
1393
1394 /*
1395  * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
1396  * (as needed), and set the request r_osd appropriately.  If there is
1397  * no up osd, set r_osd to NULL.  Move the request to the appropriate list
1398  * (unsent, homeless) or leave on in-flight lru.
1399  *
1400  * Return 0 if unchanged, 1 if changed, or negative on error.
1401  *
1402  * Caller should hold map_sem for read and request_mutex.
1403  */
1404 static int __map_request(struct ceph_osd_client *osdc,
1405                          struct ceph_osd_request *req, int force_resend)
1406 {
1407         enum calc_target_result ct_res;
1408         int err;
1409
1410         dout("map_request %p tid %lld\n", req, req->r_tid);
1411
1412         ct_res = calc_target(osdc, &req->r_t, NULL, force_resend);
1413         switch (ct_res) {
1414         case CALC_TARGET_POOL_DNE:
1415                 list_move(&req->r_req_lru_item, &osdc->req_notarget);
1416                 return -EIO;
1417         case CALC_TARGET_NO_ACTION:
1418                 return 0;  /* no change */
1419         default:
1420                 BUG_ON(ct_res != CALC_TARGET_NEED_RESEND);
1421         }
1422
1423         dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
1424              req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed, req->r_t.osd,
1425              req->r_osd ? req->r_osd->o_osd : -1);
1426
1427         if (req->r_osd) {
1428                 __cancel_request(req);
1429                 list_del_init(&req->r_osd_item);
1430                 list_del_init(&req->r_linger_osd_item);
1431                 req->r_osd = NULL;
1432         }
1433
1434         req->r_osd = lookup_osd(&osdc->osds, req->r_t.osd);
1435         if (!req->r_osd && req->r_t.osd >= 0) {
1436                 err = -ENOMEM;
1437                 req->r_osd = create_osd(osdc, req->r_t.osd);
1438                 if (!req->r_osd) {
1439                         list_move(&req->r_req_lru_item, &osdc->req_notarget);
1440                         goto out;
1441                 }
1442
1443                 dout("map_request osd %p is osd%d\n", req->r_osd,
1444                      req->r_osd->o_osd);
1445                 insert_osd(&osdc->osds, req->r_osd);
1446
1447                 ceph_con_open(&req->r_osd->o_con,
1448                               CEPH_ENTITY_TYPE_OSD, req->r_osd->o_osd,
1449                               &osdc->osdmap->osd_addr[req->r_osd->o_osd]);
1450         }
1451
1452         __enqueue_request(req);
1453         err = 1;   /* osd or pg changed */
1454
1455 out:
1456         return err;
1457 }
1458
1459 static void setup_request_data(struct ceph_osd_request *req,
1460                                struct ceph_msg *msg)
1461 {
1462         u32 data_len = 0;
1463         int i;
1464
1465         if (!list_empty(&msg->data))
1466                 return;
1467
1468         WARN_ON(msg->data_length);
1469         for (i = 0; i < req->r_num_ops; i++) {
1470                 struct ceph_osd_req_op *op = &req->r_ops[i];
1471
1472                 switch (op->op) {
1473                 /* request */
1474                 case CEPH_OSD_OP_WRITE:
1475                 case CEPH_OSD_OP_WRITEFULL:
1476                         WARN_ON(op->indata_len != op->extent.length);
1477                         ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1478                         break;
1479                 case CEPH_OSD_OP_SETXATTR:
1480                 case CEPH_OSD_OP_CMPXATTR:
1481                         WARN_ON(op->indata_len != op->xattr.name_len +
1482                                                   op->xattr.value_len);
1483                         ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1484                         break;
1485
1486                 /* reply */
1487                 case CEPH_OSD_OP_STAT:
1488                         ceph_osdc_msg_data_add(req->r_reply,
1489                                                &op->raw_data_in);
1490                         break;
1491                 case CEPH_OSD_OP_READ:
1492                         ceph_osdc_msg_data_add(req->r_reply,
1493                                                &op->extent.osd_data);
1494                         break;
1495
1496                 /* both */
1497                 case CEPH_OSD_OP_CALL:
1498                         WARN_ON(op->indata_len != op->cls.class_len +
1499                                                   op->cls.method_len +
1500                                                   op->cls.indata_len);
1501                         ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1502                         /* optional, can be NONE */
1503                         ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1504                         /* optional, can be NONE */
1505                         ceph_osdc_msg_data_add(req->r_reply,
1506                                                &op->cls.response_data);
1507                         break;
1508                 }
1509
1510                 data_len += op->indata_len;
1511         }
1512
1513         WARN_ON(data_len != msg->data_length);
1514 }
1515
1516 static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1517 {
1518         void *p = msg->front.iov_base;
1519         void *const end = p + msg->front_alloc_len;
1520         u32 data_len = 0;
1521         int i;
1522
1523         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1524                 /* snapshots aren't writeable */
1525                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1526         } else {
1527                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1528                         req->r_data_offset || req->r_snapc);
1529         }
1530
1531         setup_request_data(req, msg);
1532
1533         ceph_encode_32(&p, 1); /* client_inc, always 1 */
1534         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1535         ceph_encode_32(&p, req->r_flags);
1536         ceph_encode_timespec(p, &req->r_mtime);
1537         p += sizeof(struct ceph_timespec);
1538         /* aka reassert_version */
1539         memcpy(p, &req->r_replay_version, sizeof(req->r_replay_version));
1540         p += sizeof(req->r_replay_version);
1541
1542         /* oloc */
1543         ceph_encode_8(&p, 4);
1544         ceph_encode_8(&p, 4);
1545         ceph_encode_32(&p, 8 + 4 + 4);
1546         ceph_encode_64(&p, req->r_t.target_oloc.pool);
1547         ceph_encode_32(&p, -1); /* preferred */
1548         ceph_encode_32(&p, 0); /* key len */
1549
1550         /* pgid */
1551         ceph_encode_8(&p, 1);
1552         ceph_encode_64(&p, req->r_t.pgid.pool);
1553         ceph_encode_32(&p, req->r_t.pgid.seed);
1554         ceph_encode_32(&p, -1); /* preferred */
1555
1556         /* oid */
1557         ceph_encode_32(&p, req->r_t.target_oid.name_len);
1558         memcpy(p, req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1559         p += req->r_t.target_oid.name_len;
1560
1561         /* ops, can imply data */
1562         ceph_encode_16(&p, req->r_num_ops);
1563         for (i = 0; i < req->r_num_ops; i++) {
1564                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1565                 p += sizeof(struct ceph_osd_op);
1566         }
1567
1568         ceph_encode_64(&p, req->r_snapid); /* snapid */
1569         if (req->r_snapc) {
1570                 ceph_encode_64(&p, req->r_snapc->seq);
1571                 ceph_encode_32(&p, req->r_snapc->num_snaps);
1572                 for (i = 0; i < req->r_snapc->num_snaps; i++)
1573                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
1574         } else {
1575                 ceph_encode_64(&p, 0); /* snap_seq */
1576                 ceph_encode_32(&p, 0); /* snaps len */
1577         }
1578
1579         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1580
1581         BUG_ON(p > end);
1582         msg->front.iov_len = p - msg->front.iov_base;
1583         msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
1584         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1585         msg->hdr.data_len = cpu_to_le32(data_len);
1586         /*
1587          * The header "data_off" is a hint to the receiver allowing it
1588          * to align received data into its buffers such that there's no
1589          * need to re-copy it before writing it to disk (direct I/O).
1590          */
1591         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1592
1593         dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__,
1594              req, req->r_t.target_oid.name_len, req->r_t.target_oid.name,
1595              req->r_t.target_oid.name_len, msg->front.iov_len, data_len);
1596 }
1597
1598 /*
1599  * @req has to be assigned a tid and registered.
1600  */
1601 static void send_request(struct ceph_osd_request *req)
1602 {
1603         struct ceph_osd *osd = req->r_osd;
1604
1605         WARN_ON(osd->o_osd != req->r_t.osd);
1606
1607         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
1608         if (req->r_attempts)
1609                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1610         else
1611                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
1612
1613         encode_request(req, req->r_request);
1614
1615         dout("%s req %p tid %llu to pg %llu.%x osd%d flags 0x%x attempt %d\n",
1616              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
1617              req->r_t.osd, req->r_flags, req->r_attempts);
1618
1619         req->r_t.paused = false;
1620         req->r_stamp = jiffies;
1621         req->r_attempts++;
1622
1623         req->r_sent = osd->o_incarnation;
1624         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1625         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
1626 }
1627
1628 /*
1629  * Send any requests in the queue (req_unsent).
1630  */
1631 static void __send_queued(struct ceph_osd_client *osdc)
1632 {
1633         struct ceph_osd_request *req, *tmp;
1634
1635         dout("__send_queued\n");
1636         list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) {
1637                 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
1638                 send_request(req);
1639         }
1640 }
1641
1642 /*
1643  * Caller should hold map_sem for read and request_mutex.
1644  */
1645 static int __ceph_osdc_start_request(struct ceph_osd_client *osdc,
1646                                      struct ceph_osd_request *req,
1647                                      bool nofail)
1648 {
1649         int rc;
1650
1651         __register_request(osdc, req);
1652         req->r_sent = 0;
1653         req->r_got_reply = 0;
1654         rc = __map_request(osdc, req, 0);
1655         if (rc < 0) {
1656                 if (nofail) {
1657                         dout("osdc_start_request failed map, "
1658                                 " will retry %lld\n", req->r_tid);
1659                         rc = 0;
1660                 } else {
1661                         __unregister_request(osdc, req);
1662                 }
1663                 return rc;
1664         }
1665
1666         if (req->r_osd == NULL) {
1667                 dout("send_request %p no up osds in pg\n", req);
1668                 ceph_monc_request_next_osdmap(&osdc->client->monc);
1669         } else {
1670                 __send_queued(osdc);
1671         }
1672
1673         return 0;
1674 }
1675
1676 static void __complete_request(struct ceph_osd_request *req)
1677 {
1678         if (req->r_callback)
1679                 req->r_callback(req);
1680         else
1681                 complete_all(&req->r_completion);
1682 }
1683
1684 /*
1685  * Timeout callback, called every N seconds.  When 1 or more OSD
1686  * requests has been active for more than N seconds, we send a keepalive
1687  * (tag + timestamp) to its OSD to ensure any communications channel
1688  * reset is detected.
1689  */
1690 static void handle_timeout(struct work_struct *work)
1691 {
1692         struct ceph_osd_client *osdc =
1693                 container_of(work, struct ceph_osd_client, timeout_work.work);
1694         struct ceph_options *opts = osdc->client->options;
1695         struct ceph_osd_request *req;
1696         struct ceph_osd *osd;
1697         struct list_head slow_osds;
1698         dout("timeout\n");
1699         down_read(&osdc->map_sem);
1700
1701         ceph_monc_request_next_osdmap(&osdc->client->monc);
1702
1703         mutex_lock(&osdc->request_mutex);
1704
1705         /*
1706          * ping osds that are a bit slow.  this ensures that if there
1707          * is a break in the TCP connection we will notice, and reopen
1708          * a connection with that osd (from the fault callback).
1709          */
1710         INIT_LIST_HEAD(&slow_osds);
1711         list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
1712                 if (time_before(jiffies,
1713                                 req->r_stamp + opts->osd_keepalive_timeout))
1714                         break;
1715
1716                 osd = req->r_osd;
1717                 BUG_ON(!osd);
1718                 dout(" tid %llu is slow, will send keepalive on osd%d\n",
1719                      req->r_tid, osd->o_osd);
1720                 list_move_tail(&osd->o_keepalive_item, &slow_osds);
1721         }
1722         while (!list_empty(&slow_osds)) {
1723                 osd = list_entry(slow_osds.next, struct ceph_osd,
1724                                  o_keepalive_item);
1725                 list_del_init(&osd->o_keepalive_item);
1726                 ceph_con_keepalive(&osd->o_con);
1727         }
1728
1729         __send_queued(osdc);
1730         mutex_unlock(&osdc->request_mutex);
1731         up_read(&osdc->map_sem);
1732
1733         schedule_delayed_work(&osdc->timeout_work,
1734                               osdc->client->options->osd_keepalive_timeout);
1735 }
1736
1737 static void handle_osds_timeout(struct work_struct *work)
1738 {
1739         struct ceph_osd_client *osdc =
1740                 container_of(work, struct ceph_osd_client,
1741                              osds_timeout_work.work);
1742         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
1743         struct ceph_osd *osd, *nosd;
1744
1745         dout("%s osdc %p\n", __func__, osdc);
1746         down_read(&osdc->map_sem);
1747         mutex_lock(&osdc->request_mutex);
1748
1749         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
1750                 if (time_before(jiffies, osd->lru_ttl))
1751                         break;
1752
1753                 remove_osd(osdc, osd);
1754         }
1755
1756         mutex_unlock(&osdc->request_mutex);
1757         up_read(&osdc->map_sem);
1758         schedule_delayed_work(&osdc->osds_timeout_work,
1759                               round_jiffies_relative(delay));
1760 }
1761
1762 static int ceph_oloc_decode(void **p, void *end,
1763                             struct ceph_object_locator *oloc)
1764 {
1765         u8 struct_v, struct_cv;
1766         u32 len;
1767         void *struct_end;
1768         int ret = 0;
1769
1770         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
1771         struct_v = ceph_decode_8(p);
1772         struct_cv = ceph_decode_8(p);
1773         if (struct_v < 3) {
1774                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
1775                         struct_v, struct_cv);
1776                 goto e_inval;
1777         }
1778         if (struct_cv > 6) {
1779                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
1780                         struct_v, struct_cv);
1781                 goto e_inval;
1782         }
1783         len = ceph_decode_32(p);
1784         ceph_decode_need(p, end, len, e_inval);
1785         struct_end = *p + len;
1786
1787         oloc->pool = ceph_decode_64(p);
1788         *p += 4; /* skip preferred */
1789
1790         len = ceph_decode_32(p);
1791         if (len > 0) {
1792                 pr_warn("ceph_object_locator::key is set\n");
1793                 goto e_inval;
1794         }
1795
1796         if (struct_v >= 5) {
1797                 len = ceph_decode_32(p);
1798                 if (len > 0) {
1799                         pr_warn("ceph_object_locator::nspace is set\n");
1800                         goto e_inval;
1801                 }
1802         }
1803
1804         if (struct_v >= 6) {
1805                 s64 hash = ceph_decode_64(p);
1806                 if (hash != -1) {
1807                         pr_warn("ceph_object_locator::hash is set\n");
1808                         goto e_inval;
1809                 }
1810         }
1811
1812         /* skip the rest */
1813         *p = struct_end;
1814 out:
1815         return ret;
1816
1817 e_inval:
1818         ret = -EINVAL;
1819         goto out;
1820 }
1821
1822 static int ceph_redirect_decode(void **p, void *end,
1823                                 struct ceph_request_redirect *redir)
1824 {
1825         u8 struct_v, struct_cv;
1826         u32 len;
1827         void *struct_end;
1828         int ret;
1829
1830         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
1831         struct_v = ceph_decode_8(p);
1832         struct_cv = ceph_decode_8(p);
1833         if (struct_cv > 1) {
1834                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
1835                         struct_v, struct_cv);
1836                 goto e_inval;
1837         }
1838         len = ceph_decode_32(p);
1839         ceph_decode_need(p, end, len, e_inval);
1840         struct_end = *p + len;
1841
1842         ret = ceph_oloc_decode(p, end, &redir->oloc);
1843         if (ret)
1844                 goto out;
1845
1846         len = ceph_decode_32(p);
1847         if (len > 0) {
1848                 pr_warn("ceph_request_redirect::object_name is set\n");
1849                 goto e_inval;
1850         }
1851
1852         len = ceph_decode_32(p);
1853         *p += len; /* skip osd_instructions */
1854
1855         /* skip the rest */
1856         *p = struct_end;
1857 out:
1858         return ret;
1859
1860 e_inval:
1861         ret = -EINVAL;
1862         goto out;
1863 }
1864
1865 struct MOSDOpReply {
1866         struct ceph_pg pgid;
1867         u64 flags;
1868         int result;
1869         u32 epoch;
1870         int num_ops;
1871         u32 outdata_len[CEPH_OSD_MAX_OPS];
1872         s32 rval[CEPH_OSD_MAX_OPS];
1873         int retry_attempt;
1874         struct ceph_eversion replay_version;
1875         u64 user_version;
1876         struct ceph_request_redirect redirect;
1877 };
1878
1879 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
1880 {
1881         void *p = msg->front.iov_base;
1882         void *const end = p + msg->front.iov_len;
1883         u16 version = le16_to_cpu(msg->hdr.version);
1884         struct ceph_eversion bad_replay_version;
1885         u8 decode_redir;
1886         u32 len;
1887         int ret;
1888         int i;
1889
1890         ceph_decode_32_safe(&p, end, len, e_inval);
1891         ceph_decode_need(&p, end, len, e_inval);
1892         p += len; /* skip oid */
1893
1894         ret = ceph_decode_pgid(&p, end, &m->pgid);
1895         if (ret)
1896                 return ret;
1897
1898         ceph_decode_64_safe(&p, end, m->flags, e_inval);
1899         ceph_decode_32_safe(&p, end, m->result, e_inval);
1900         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
1901         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
1902         p += sizeof(bad_replay_version);
1903         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
1904
1905         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
1906         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
1907                 goto e_inval;
1908
1909         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
1910                          e_inval);
1911         for (i = 0; i < m->num_ops; i++) {
1912                 struct ceph_osd_op *op = p;
1913
1914                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
1915                 p += sizeof(*op);
1916         }
1917
1918         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
1919         for (i = 0; i < m->num_ops; i++)
1920                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
1921
1922         if (version >= 5) {
1923                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
1924                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
1925                 p += sizeof(m->replay_version);
1926                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
1927         } else {
1928                 m->replay_version = bad_replay_version; /* struct */
1929                 m->user_version = le64_to_cpu(m->replay_version.version);
1930         }
1931
1932         if (version >= 6) {
1933                 if (version >= 7)
1934                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
1935                 else
1936                         decode_redir = 1;
1937         } else {
1938                 decode_redir = 0;
1939         }
1940
1941         if (decode_redir) {
1942                 ret = ceph_redirect_decode(&p, end, &m->redirect);
1943                 if (ret)
1944                         return ret;
1945         } else {
1946                 ceph_oloc_init(&m->redirect.oloc);
1947         }
1948
1949         return 0;
1950
1951 e_inval:
1952         return -EINVAL;
1953 }
1954
1955 /*
1956  * We are done with @req if
1957  *   - @m is a safe reply, or
1958  *   - @m is an unsafe reply and we didn't want a safe one
1959  */
1960 static bool done_request(const struct ceph_osd_request *req,
1961                          const struct MOSDOpReply *m)
1962 {
1963         return (m->result < 0 ||
1964                 (m->flags & CEPH_OSD_FLAG_ONDISK) ||
1965                 !(req->r_flags & CEPH_OSD_FLAG_ONDISK));
1966 }
1967
1968 /*
1969  * handle osd op reply.  either call the callback if it is specified,
1970  * or do the completion to wake up the waiting thread.
1971  *
1972  * ->r_unsafe_callback is set?  yes                     no
1973  *
1974  * first reply is OK (needed    r_cb/r_completion,      r_cb/r_completion,
1975  * any or needed/got safe)      r_safe_completion       r_safe_completion
1976  *
1977  * first reply is unsafe        r_unsafe_cb(true)       (nothing)
1978  *
1979  * when we get the safe reply   r_unsafe_cb(false),     r_cb/r_completion,
1980  *                              r_safe_completion       r_safe_completion
1981  */
1982 static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1983 {
1984         struct ceph_osd_request *req;
1985         struct MOSDOpReply m;
1986         u64 tid = le64_to_cpu(msg->hdr.tid);
1987         u32 data_len = 0;
1988         bool already_acked;
1989         int ret;
1990         int i;
1991
1992         dout("%s msg %p tid %llu\n", __func__, msg, tid);
1993
1994         down_read(&osdc->map_sem);
1995         mutex_lock(&osdc->request_mutex);
1996         req = lookup_request(&osdc->requests, tid);
1997         if (!req) {
1998                 dout("%s no tid %llu\n", __func__, tid);
1999                 goto out_unlock;
2000         }
2001         ceph_osdc_get_request(req);
2002
2003         ret = decode_MOSDOpReply(msg, &m);
2004         if (ret) {
2005                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
2006                        req->r_tid, ret);
2007                 ceph_msg_dump(msg);
2008                 goto fail_request;
2009         }
2010         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
2011              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
2012              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
2013              le64_to_cpu(m.replay_version.version), m.user_version);
2014
2015         if (m.retry_attempt >= 0) {
2016                 if (m.retry_attempt != req->r_attempts - 1) {
2017                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
2018                              req, req->r_tid, m.retry_attempt,
2019                              req->r_attempts - 1);
2020                         goto out_put;
2021                 }
2022         } else {
2023                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
2024         }
2025
2026         if (!ceph_oloc_empty(&m.redirect.oloc)) {
2027                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
2028                      m.redirect.oloc.pool);
2029                 __unregister_request(osdc, req);
2030
2031                 ceph_oloc_copy(&req->r_t.target_oloc, &m.redirect.oloc);
2032
2033                 /*
2034                  * Start redirect requests with nofail=true.  If
2035                  * mapping fails, request will end up on the notarget
2036                  * list, waiting for the new osdmap (which can take
2037                  * a while), even though the original request mapped
2038                  * successfully.  In the future we might want to follow
2039                  * original request's nofail setting here.
2040                  */
2041                 ret = __ceph_osdc_start_request(osdc, req, true);
2042                 BUG_ON(ret);
2043
2044                 goto out_put;
2045         }
2046
2047         if (m.num_ops != req->r_num_ops) {
2048                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
2049                        req->r_num_ops, req->r_tid);
2050                 goto fail_request;
2051         }
2052         for (i = 0; i < req->r_num_ops; i++) {
2053                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
2054                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
2055                 req->r_ops[i].rval = m.rval[i];
2056                 req->r_ops[i].outdata_len = m.outdata_len[i];
2057                 data_len += m.outdata_len[i];
2058         }
2059         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
2060                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
2061                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
2062                 goto fail_request;
2063         }
2064         dout("%s req %p tid %llu acked %d result %d data_len %u\n", __func__,
2065              req, req->r_tid, req->r_got_reply, m.result, data_len);
2066
2067         already_acked = req->r_got_reply;
2068         if (!already_acked) {
2069                 req->r_result = m.result ?: data_len;
2070                 req->r_replay_version = m.replay_version; /* struct */
2071                 req->r_got_reply = true;
2072         } else if (!(m.flags & CEPH_OSD_FLAG_ONDISK)) {
2073                 dout("req %p tid %llu dup ack\n", req, req->r_tid);
2074                 goto out_put;
2075         }
2076
2077         if (done_request(req, &m)) {
2078                 __unregister_request(osdc, req);
2079                 if (req->r_linger) {
2080                         WARN_ON(req->r_unsafe_callback);
2081                         __register_linger_request(osdc, req);
2082                 }
2083         }
2084
2085         mutex_unlock(&osdc->request_mutex);
2086         up_read(&osdc->map_sem);
2087
2088         if (done_request(req, &m)) {
2089                 if (already_acked && req->r_unsafe_callback) {
2090                         dout("req %p tid %llu safe-cb\n", req, req->r_tid);
2091                         req->r_unsafe_callback(req, false);
2092                 } else {
2093                         dout("req %p tid %llu cb\n", req, req->r_tid);
2094                         __complete_request(req);
2095                 }
2096         } else {
2097                 if (req->r_unsafe_callback) {
2098                         dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
2099                         req->r_unsafe_callback(req, true);
2100                 } else {
2101                         WARN_ON(1);
2102                 }
2103         }
2104         if (m.flags & CEPH_OSD_FLAG_ONDISK)
2105                 complete_all(&req->r_safe_completion);
2106
2107         ceph_osdc_put_request(req);
2108         return;
2109
2110 fail_request:
2111         req->r_result = -EIO;
2112         __unregister_request(osdc, req);
2113         __complete_request(req);
2114         complete_all(&req->r_safe_completion);
2115 out_put:
2116         ceph_osdc_put_request(req);
2117 out_unlock:
2118         mutex_unlock(&osdc->request_mutex);
2119         up_read(&osdc->map_sem);
2120 }
2121
2122 static void reset_changed_osds(struct ceph_osd_client *osdc)
2123 {
2124         struct rb_node *p, *n;
2125
2126         dout("%s %p\n", __func__, osdc);
2127         for (p = rb_first(&osdc->osds); p; p = n) {
2128                 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
2129
2130                 n = rb_next(p);
2131                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
2132                     memcmp(&osd->o_con.peer_addr,
2133                            ceph_osd_addr(osdc->osdmap,
2134                                          osd->o_osd),
2135                            sizeof(struct ceph_entity_addr)) != 0)
2136                         __reset_osd(osdc, osd);
2137         }
2138 }
2139
2140 /*
2141  * Requeue requests whose mapping to an OSD has changed.  If requests map to
2142  * no osd, request a new map.
2143  *
2144  * Caller should hold map_sem for read.
2145  */
2146 static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
2147                           bool force_resend_writes)
2148 {
2149         struct ceph_osd_request *req, *nreq;
2150         struct rb_node *p;
2151         int needmap = 0;
2152         int err;
2153         bool force_resend_req;
2154
2155         dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "",
2156                 force_resend_writes ? " (force resend writes)" : "");
2157         mutex_lock(&osdc->request_mutex);
2158         for (p = rb_first(&osdc->requests); p; ) {
2159                 req = rb_entry(p, struct ceph_osd_request, r_node);
2160                 p = rb_next(p);
2161
2162                 /*
2163                  * For linger requests that have not yet been
2164                  * registered, move them to the linger list; they'll
2165                  * be sent to the osd in the loop below.  Unregister
2166                  * the request before re-registering it as a linger
2167                  * request to ensure the __map_request() below
2168                  * will decide it needs to be sent.
2169                  */
2170                 if (req->r_linger && list_empty(&req->r_linger_item)) {
2171                         dout("%p tid %llu restart on osd%d\n",
2172                              req, req->r_tid,
2173                              req->r_osd ? req->r_osd->o_osd : -1);
2174                         ceph_osdc_get_request(req);
2175                         __unregister_request(osdc, req);
2176                         __register_linger_request(osdc, req);
2177                         ceph_osdc_put_request(req);
2178                         continue;
2179                 }
2180
2181                 force_resend_req = force_resend ||
2182                         (force_resend_writes &&
2183                                 req->r_flags & CEPH_OSD_FLAG_WRITE);
2184                 err = __map_request(osdc, req, force_resend_req);
2185                 if (err < 0)
2186                         continue;  /* error */
2187                 if (req->r_osd == NULL) {
2188                         dout("%p tid %llu maps to no osd\n", req, req->r_tid);
2189                         needmap++;  /* request a newer map */
2190                 } else if (err > 0) {
2191                         if (!req->r_linger) {
2192                                 dout("%p tid %llu requeued on osd%d\n", req,
2193                                      req->r_tid,
2194                                      req->r_osd ? req->r_osd->o_osd : -1);
2195                                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
2196                         }
2197                 }
2198         }
2199
2200         list_for_each_entry_safe(req, nreq, &osdc->req_linger,
2201                                  r_linger_item) {
2202                 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
2203
2204                 err = __map_request(osdc, req,
2205                                     force_resend || force_resend_writes);
2206                 dout("__map_request returned %d\n", err);
2207                 if (err < 0)
2208                         continue;  /* hrm! */
2209                 if (req->r_osd == NULL || err > 0) {
2210                         if (req->r_osd == NULL) {
2211                                 dout("lingering %p tid %llu maps to no osd\n",
2212                                      req, req->r_tid);
2213                                 /*
2214                                  * A homeless lingering request makes
2215                                  * no sense, as it's job is to keep
2216                                  * a particular OSD connection open.
2217                                  * Request a newer map and kick the
2218                                  * request, knowing that it won't be
2219                                  * resent until we actually get a map
2220                                  * that can tell us where to send it.
2221                                  */
2222                                 needmap++;
2223                         }
2224
2225                         dout("kicking lingering %p tid %llu osd%d\n", req,
2226                              req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
2227                         __register_request(osdc, req);
2228                         __unregister_linger_request(osdc, req);
2229                 }
2230         }
2231         reset_changed_osds(osdc);
2232         mutex_unlock(&osdc->request_mutex);
2233
2234         if (needmap) {
2235                 dout("%d requests for down osds, need new map\n", needmap);
2236                 ceph_monc_request_next_osdmap(&osdc->client->monc);
2237         }
2238 }
2239
2240
2241 /*
2242  * Process updated osd map.
2243  *
2244  * The message contains any number of incremental and full maps, normally
2245  * indicating some sort of topology change in the cluster.  Kick requests
2246  * off to different OSDs as needed.
2247  */
2248 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
2249 {
2250         void *p, *end, *next;
2251         u32 nr_maps, maplen;
2252         u32 epoch;
2253         struct ceph_osdmap *newmap = NULL, *oldmap;
2254         int err;
2255         struct ceph_fsid fsid;
2256         bool was_full;
2257
2258         dout("handle_map have %u\n", osdc->osdmap->epoch);
2259         p = msg->front.iov_base;
2260         end = p + msg->front.iov_len;
2261
2262         /* verify fsid */
2263         ceph_decode_need(&p, end, sizeof(fsid), bad);
2264         ceph_decode_copy(&p, &fsid, sizeof(fsid));
2265         if (ceph_check_fsid(osdc->client, &fsid) < 0)
2266                 return;
2267
2268         down_write(&osdc->map_sem);
2269
2270         was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
2271
2272         /* incremental maps */
2273         ceph_decode_32_safe(&p, end, nr_maps, bad);
2274         dout(" %d inc maps\n", nr_maps);
2275         while (nr_maps > 0) {
2276                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2277                 epoch = ceph_decode_32(&p);
2278                 maplen = ceph_decode_32(&p);
2279                 ceph_decode_need(&p, end, maplen, bad);
2280                 next = p + maplen;
2281                 if (osdc->osdmap->epoch+1 == epoch) {
2282                         dout("applying incremental map %u len %d\n",
2283                              epoch, maplen);
2284                         newmap = osdmap_apply_incremental(&p, next,
2285                                                           osdc->osdmap);
2286                         if (IS_ERR(newmap)) {
2287                                 err = PTR_ERR(newmap);
2288                                 goto bad;
2289                         }
2290                         BUG_ON(!newmap);
2291                         if (newmap != osdc->osdmap) {
2292                                 ceph_osdmap_destroy(osdc->osdmap);
2293                                 osdc->osdmap = newmap;
2294                         }
2295                         was_full = was_full ||
2296                                 ceph_osdmap_flag(osdc->osdmap,
2297                                                  CEPH_OSDMAP_FULL);
2298                         kick_requests(osdc, 0, was_full);
2299                 } else {
2300                         dout("ignoring incremental map %u len %d\n",
2301                              epoch, maplen);
2302                 }
2303                 p = next;
2304                 nr_maps--;
2305         }
2306         if (newmap)
2307                 goto done;
2308
2309         /* full maps */
2310         ceph_decode_32_safe(&p, end, nr_maps, bad);
2311         dout(" %d full maps\n", nr_maps);
2312         while (nr_maps) {
2313                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2314                 epoch = ceph_decode_32(&p);
2315                 maplen = ceph_decode_32(&p);
2316                 ceph_decode_need(&p, end, maplen, bad);
2317                 if (nr_maps > 1) {
2318                         dout("skipping non-latest full map %u len %d\n",
2319                              epoch, maplen);
2320                 } else if (osdc->osdmap->epoch >= epoch) {
2321                         dout("skipping full map %u len %d, "
2322                              "older than our %u\n", epoch, maplen,
2323                              osdc->osdmap->epoch);
2324                 } else {
2325                         int skipped_map = 0;
2326
2327                         dout("taking full map %u len %d\n", epoch, maplen);
2328                         newmap = ceph_osdmap_decode(&p, p+maplen);
2329                         if (IS_ERR(newmap)) {
2330                                 err = PTR_ERR(newmap);
2331                                 goto bad;
2332                         }
2333                         BUG_ON(!newmap);
2334                         oldmap = osdc->osdmap;
2335                         osdc->osdmap = newmap;
2336                         if (oldmap) {
2337                                 if (oldmap->epoch + 1 < newmap->epoch)
2338                                         skipped_map = 1;
2339                                 ceph_osdmap_destroy(oldmap);
2340                         }
2341                         was_full = was_full ||
2342                                 ceph_osdmap_flag(osdc->osdmap,
2343                                                  CEPH_OSDMAP_FULL);
2344                         kick_requests(osdc, skipped_map, was_full);
2345                 }
2346                 p += maplen;
2347                 nr_maps--;
2348         }
2349
2350 done:
2351         downgrade_write(&osdc->map_sem);
2352         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2353                           osdc->osdmap->epoch);
2354
2355         /*
2356          * subscribe to subsequent osdmap updates if full to ensure
2357          * we find out when we are no longer full and stop returning
2358          * ENOSPC.
2359          */
2360         if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
2361                 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
2362                 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR))
2363                 ceph_monc_request_next_osdmap(&osdc->client->monc);
2364
2365         mutex_lock(&osdc->request_mutex);
2366         __send_queued(osdc);
2367         mutex_unlock(&osdc->request_mutex);
2368         up_read(&osdc->map_sem);
2369         wake_up_all(&osdc->client->auth_wq);
2370         return;
2371
2372 bad:
2373         pr_err("osdc handle_map corrupt msg\n");
2374         ceph_msg_dump(msg);
2375         up_write(&osdc->map_sem);
2376 }
2377
2378 /*
2379  * watch/notify callback event infrastructure
2380  *
2381  * These callbacks are used both for watch and notify operations.
2382  */
2383 static void __release_event(struct kref *kref)
2384 {
2385         struct ceph_osd_event *event =
2386                 container_of(kref, struct ceph_osd_event, kref);
2387
2388         dout("__release_event %p\n", event);
2389         kfree(event);
2390 }
2391
2392 static void get_event(struct ceph_osd_event *event)
2393 {
2394         kref_get(&event->kref);
2395 }
2396
2397 void ceph_osdc_put_event(struct ceph_osd_event *event)
2398 {
2399         kref_put(&event->kref, __release_event);
2400 }
2401 EXPORT_SYMBOL(ceph_osdc_put_event);
2402
2403 static void __insert_event(struct ceph_osd_client *osdc,
2404                              struct ceph_osd_event *new)
2405 {
2406         struct rb_node **p = &osdc->event_tree.rb_node;
2407         struct rb_node *parent = NULL;
2408         struct ceph_osd_event *event = NULL;
2409
2410         while (*p) {
2411                 parent = *p;
2412                 event = rb_entry(parent, struct ceph_osd_event, node);
2413                 if (new->cookie < event->cookie)
2414                         p = &(*p)->rb_left;
2415                 else if (new->cookie > event->cookie)
2416                         p = &(*p)->rb_right;
2417                 else
2418                         BUG();
2419         }
2420
2421         rb_link_node(&new->node, parent, p);
2422         rb_insert_color(&new->node, &osdc->event_tree);
2423 }
2424
2425 static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
2426                                                 u64 cookie)
2427 {
2428         struct rb_node **p = &osdc->event_tree.rb_node;
2429         struct rb_node *parent = NULL;
2430         struct ceph_osd_event *event = NULL;
2431
2432         while (*p) {
2433                 parent = *p;
2434                 event = rb_entry(parent, struct ceph_osd_event, node);
2435                 if (cookie < event->cookie)
2436                         p = &(*p)->rb_left;
2437                 else if (cookie > event->cookie)
2438                         p = &(*p)->rb_right;
2439                 else
2440                         return event;
2441         }
2442         return NULL;
2443 }
2444
2445 static void __remove_event(struct ceph_osd_event *event)
2446 {
2447         struct ceph_osd_client *osdc = event->osdc;
2448
2449         if (!RB_EMPTY_NODE(&event->node)) {
2450                 dout("__remove_event removed %p\n", event);
2451                 rb_erase(&event->node, &osdc->event_tree);
2452                 ceph_osdc_put_event(event);
2453         } else {
2454                 dout("__remove_event didn't remove %p\n", event);
2455         }
2456 }
2457
2458 int ceph_osdc_create_event(struct ceph_osd_client *osdc,
2459                            void (*event_cb)(u64, u64, u8, void *),
2460                            void *data, struct ceph_osd_event **pevent)
2461 {
2462         struct ceph_osd_event *event;
2463
2464         event = kmalloc(sizeof(*event), GFP_NOIO);
2465         if (!event)
2466                 return -ENOMEM;
2467
2468         dout("create_event %p\n", event);
2469         event->cb = event_cb;
2470         event->one_shot = 0;
2471         event->data = data;
2472         event->osdc = osdc;
2473         INIT_LIST_HEAD(&event->osd_node);
2474         RB_CLEAR_NODE(&event->node);
2475         kref_init(&event->kref);   /* one ref for us */
2476         kref_get(&event->kref);    /* one ref for the caller */
2477
2478         spin_lock(&osdc->event_lock);
2479         event->cookie = ++osdc->event_count;
2480         __insert_event(osdc, event);
2481         spin_unlock(&osdc->event_lock);
2482
2483         *pevent = event;
2484         return 0;
2485 }
2486 EXPORT_SYMBOL(ceph_osdc_create_event);
2487
2488 void ceph_osdc_cancel_event(struct ceph_osd_event *event)
2489 {
2490         struct ceph_osd_client *osdc = event->osdc;
2491
2492         dout("cancel_event %p\n", event);
2493         spin_lock(&osdc->event_lock);
2494         __remove_event(event);
2495         spin_unlock(&osdc->event_lock);
2496         ceph_osdc_put_event(event); /* caller's */
2497 }
2498 EXPORT_SYMBOL(ceph_osdc_cancel_event);
2499
2500
2501 static void do_event_work(struct work_struct *work)
2502 {
2503         struct ceph_osd_event_work *event_work =
2504                 container_of(work, struct ceph_osd_event_work, work);
2505         struct ceph_osd_event *event = event_work->event;
2506         u64 ver = event_work->ver;
2507         u64 notify_id = event_work->notify_id;
2508         u8 opcode = event_work->opcode;
2509
2510         dout("do_event_work completing %p\n", event);
2511         event->cb(ver, notify_id, opcode, event->data);
2512         dout("do_event_work completed %p\n", event);
2513         ceph_osdc_put_event(event);
2514         kfree(event_work);
2515 }
2516
2517
2518 /*
2519  * Process osd watch notifications
2520  */
2521 static void handle_watch_notify(struct ceph_osd_client *osdc,
2522                                 struct ceph_msg *msg)
2523 {
2524         void *p, *end;
2525         u8 proto_ver;
2526         u64 cookie, ver, notify_id;
2527         u8 opcode;
2528         struct ceph_osd_event *event;
2529         struct ceph_osd_event_work *event_work;
2530
2531         p = msg->front.iov_base;
2532         end = p + msg->front.iov_len;
2533
2534         ceph_decode_8_safe(&p, end, proto_ver, bad);
2535         ceph_decode_8_safe(&p, end, opcode, bad);
2536         ceph_decode_64_safe(&p, end, cookie, bad);
2537         ceph_decode_64_safe(&p, end, ver, bad);
2538         ceph_decode_64_safe(&p, end, notify_id, bad);
2539
2540         spin_lock(&osdc->event_lock);
2541         event = __find_event(osdc, cookie);
2542         if (event) {
2543                 BUG_ON(event->one_shot);
2544                 get_event(event);
2545         }
2546         spin_unlock(&osdc->event_lock);
2547         dout("handle_watch_notify cookie %lld ver %lld event %p\n",
2548              cookie, ver, event);
2549         if (event) {
2550                 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
2551                 if (!event_work) {
2552                         pr_err("couldn't allocate event_work\n");
2553                         ceph_osdc_put_event(event);
2554                         return;
2555                 }
2556                 INIT_WORK(&event_work->work, do_event_work);
2557                 event_work->event = event;
2558                 event_work->ver = ver;
2559                 event_work->notify_id = notify_id;
2560                 event_work->opcode = opcode;
2561
2562                 queue_work(osdc->notify_wq, &event_work->work);
2563         }
2564
2565         return;
2566
2567 bad:
2568         pr_err("osdc handle_watch_notify corrupt msg\n");
2569 }
2570
2571 /*
2572  * Register request, send initial attempt.
2573  */
2574 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
2575                             struct ceph_osd_request *req,
2576                             bool nofail)
2577 {
2578         int rc;
2579
2580         down_read(&osdc->map_sem);
2581         mutex_lock(&osdc->request_mutex);
2582
2583         rc = __ceph_osdc_start_request(osdc, req, nofail);
2584
2585         mutex_unlock(&osdc->request_mutex);
2586         up_read(&osdc->map_sem);
2587
2588         return rc;
2589 }
2590 EXPORT_SYMBOL(ceph_osdc_start_request);
2591
2592 /*
2593  * Unregister a registered request.  The request is not completed (i.e.
2594  * no callbacks or wakeups) - higher layers are supposed to know what
2595  * they are canceling.
2596  */
2597 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
2598 {
2599         struct ceph_osd_client *osdc = req->r_osdc;
2600
2601         mutex_lock(&osdc->request_mutex);
2602         if (req->r_linger)
2603                 __unregister_linger_request(osdc, req);
2604         __unregister_request(osdc, req);
2605         mutex_unlock(&osdc->request_mutex);
2606
2607         dout("%s %p tid %llu canceled\n", __func__, req, req->r_tid);
2608 }
2609 EXPORT_SYMBOL(ceph_osdc_cancel_request);
2610
2611 /*
2612  * wait for a request to complete
2613  */
2614 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
2615                            struct ceph_osd_request *req)
2616 {
2617         int rc;
2618
2619         dout("%s %p tid %llu\n", __func__, req, req->r_tid);
2620
2621         rc = wait_for_completion_interruptible(&req->r_completion);
2622         if (rc < 0) {
2623                 dout("%s %p tid %llu interrupted\n", __func__, req, req->r_tid);
2624                 ceph_osdc_cancel_request(req);
2625
2626                 /* kludge - need to to wake ceph_osdc_sync() */
2627                 complete_all(&req->r_safe_completion);
2628                 return rc;
2629         }
2630
2631         dout("%s %p tid %llu result %d\n", __func__, req, req->r_tid,
2632              req->r_result);
2633         return req->r_result;
2634 }
2635 EXPORT_SYMBOL(ceph_osdc_wait_request);
2636
2637 /*
2638  * sync - wait for all in-flight requests to flush.  avoid starvation.
2639  */
2640 void ceph_osdc_sync(struct ceph_osd_client *osdc)
2641 {
2642         struct ceph_osd_request *req;
2643         u64 last_tid, next_tid = 0;
2644
2645         mutex_lock(&osdc->request_mutex);
2646         last_tid = osdc->last_tid;
2647         while (1) {
2648                 req = __lookup_request_ge(osdc, next_tid);
2649                 if (!req)
2650                         break;
2651                 if (req->r_tid > last_tid)
2652                         break;
2653
2654                 next_tid = req->r_tid + 1;
2655                 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
2656                         continue;
2657
2658                 ceph_osdc_get_request(req);
2659                 mutex_unlock(&osdc->request_mutex);
2660                 dout("sync waiting on tid %llu (last is %llu)\n",
2661                      req->r_tid, last_tid);
2662                 wait_for_completion(&req->r_safe_completion);
2663                 mutex_lock(&osdc->request_mutex);
2664                 ceph_osdc_put_request(req);
2665         }
2666         mutex_unlock(&osdc->request_mutex);
2667         dout("sync done (thru tid %llu)\n", last_tid);
2668 }
2669 EXPORT_SYMBOL(ceph_osdc_sync);
2670
2671 /*
2672  * Call all pending notify callbacks - for use after a watch is
2673  * unregistered, to make sure no more callbacks for it will be invoked
2674  */
2675 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
2676 {
2677         flush_workqueue(osdc->notify_wq);
2678 }
2679 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
2680
2681
2682 /*
2683  * init, shutdown
2684  */
2685 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
2686 {
2687         int err;
2688
2689         dout("init\n");
2690         osdc->client = client;
2691         init_rwsem(&osdc->map_sem);
2692         mutex_init(&osdc->request_mutex);
2693         osdc->last_tid = 0;
2694         osdc->osds = RB_ROOT;
2695         INIT_LIST_HEAD(&osdc->osd_lru);
2696         osdc->requests = RB_ROOT;
2697         INIT_LIST_HEAD(&osdc->req_lru);
2698         INIT_LIST_HEAD(&osdc->req_unsent);
2699         INIT_LIST_HEAD(&osdc->req_notarget);
2700         INIT_LIST_HEAD(&osdc->req_linger);
2701         osdc->num_requests = 0;
2702         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
2703         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
2704         spin_lock_init(&osdc->event_lock);
2705         osdc->event_tree = RB_ROOT;
2706         osdc->event_count = 0;
2707
2708         err = -ENOMEM;
2709         osdc->osdmap = ceph_osdmap_alloc();
2710         if (!osdc->osdmap)
2711                 goto out;
2712
2713         osdc->req_mempool = mempool_create_slab_pool(10,
2714                                                      ceph_osd_request_cache);
2715         if (!osdc->req_mempool)
2716                 goto out_map;
2717
2718         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
2719                                 PAGE_SIZE, 10, true, "osd_op");
2720         if (err < 0)
2721                 goto out_mempool;
2722         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
2723                                 PAGE_SIZE, 10, true, "osd_op_reply");
2724         if (err < 0)
2725                 goto out_msgpool;
2726
2727         err = -ENOMEM;
2728         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
2729         if (!osdc->notify_wq)
2730                 goto out_msgpool_reply;
2731
2732         schedule_delayed_work(&osdc->timeout_work,
2733                               osdc->client->options->osd_keepalive_timeout);
2734         schedule_delayed_work(&osdc->osds_timeout_work,
2735             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
2736
2737         return 0;
2738
2739 out_msgpool_reply:
2740         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
2741 out_msgpool:
2742         ceph_msgpool_destroy(&osdc->msgpool_op);
2743 out_mempool:
2744         mempool_destroy(osdc->req_mempool);
2745 out_map:
2746         ceph_osdmap_destroy(osdc->osdmap);
2747 out:
2748         return err;
2749 }
2750
2751 void ceph_osdc_stop(struct ceph_osd_client *osdc)
2752 {
2753         flush_workqueue(osdc->notify_wq);
2754         destroy_workqueue(osdc->notify_wq);
2755         cancel_delayed_work_sync(&osdc->timeout_work);
2756         cancel_delayed_work_sync(&osdc->osds_timeout_work);
2757
2758         mutex_lock(&osdc->request_mutex);
2759         while (!RB_EMPTY_ROOT(&osdc->osds)) {
2760                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
2761                                                 struct ceph_osd, o_node);
2762                 remove_osd(osdc, osd);
2763         }
2764         mutex_unlock(&osdc->request_mutex);
2765
2766         ceph_osdmap_destroy(osdc->osdmap);
2767         mempool_destroy(osdc->req_mempool);
2768         ceph_msgpool_destroy(&osdc->msgpool_op);
2769         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
2770 }
2771
2772 /*
2773  * Read some contiguous pages.  If we cross a stripe boundary, shorten
2774  * *plen.  Return number of bytes read, or error.
2775  */
2776 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
2777                         struct ceph_vino vino, struct ceph_file_layout *layout,
2778                         u64 off, u64 *plen,
2779                         u32 truncate_seq, u64 truncate_size,
2780                         struct page **pages, int num_pages, int page_align)
2781 {
2782         struct ceph_osd_request *req;
2783         int rc = 0;
2784
2785         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
2786              vino.snap, off, *plen);
2787         req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
2788                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
2789                                     NULL, truncate_seq, truncate_size,
2790                                     false);
2791         if (IS_ERR(req))
2792                 return PTR_ERR(req);
2793
2794         /* it may be a short read due to an object boundary */
2795         osd_req_op_extent_osd_data_pages(req, 0,
2796                                 pages, *plen, page_align, false, false);
2797
2798         dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
2799              off, *plen, *plen, page_align);
2800
2801         rc = ceph_osdc_start_request(osdc, req, false);
2802         if (!rc)
2803                 rc = ceph_osdc_wait_request(osdc, req);
2804
2805         ceph_osdc_put_request(req);
2806         dout("readpages result %d\n", rc);
2807         return rc;
2808 }
2809 EXPORT_SYMBOL(ceph_osdc_readpages);
2810
2811 /*
2812  * do a synchronous write on N pages
2813  */
2814 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
2815                          struct ceph_file_layout *layout,
2816                          struct ceph_snap_context *snapc,
2817                          u64 off, u64 len,
2818                          u32 truncate_seq, u64 truncate_size,
2819                          struct timespec *mtime,
2820                          struct page **pages, int num_pages)
2821 {
2822         struct ceph_osd_request *req;
2823         int rc = 0;
2824         int page_align = off & ~PAGE_MASK;
2825
2826         req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
2827                                     CEPH_OSD_OP_WRITE,
2828                                     CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
2829                                     snapc, truncate_seq, truncate_size,
2830                                     true);
2831         if (IS_ERR(req))
2832                 return PTR_ERR(req);
2833
2834         /* it may be a short write due to an object boundary */
2835         osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
2836                                 false, false);
2837         dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
2838
2839         req->r_mtime = *mtime;
2840         rc = ceph_osdc_start_request(osdc, req, true);
2841         if (!rc)
2842                 rc = ceph_osdc_wait_request(osdc, req);
2843
2844         ceph_osdc_put_request(req);
2845         if (rc == 0)
2846                 rc = len;
2847         dout("writepages result %d\n", rc);
2848         return rc;
2849 }
2850 EXPORT_SYMBOL(ceph_osdc_writepages);
2851
2852 int ceph_osdc_setup(void)
2853 {
2854         size_t size = sizeof(struct ceph_osd_request) +
2855             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
2856
2857         BUG_ON(ceph_osd_request_cache);
2858         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
2859                                                    0, 0, NULL);
2860
2861         return ceph_osd_request_cache ? 0 : -ENOMEM;
2862 }
2863 EXPORT_SYMBOL(ceph_osdc_setup);
2864
2865 void ceph_osdc_cleanup(void)
2866 {
2867         BUG_ON(!ceph_osd_request_cache);
2868         kmem_cache_destroy(ceph_osd_request_cache);
2869         ceph_osd_request_cache = NULL;
2870 }
2871 EXPORT_SYMBOL(ceph_osdc_cleanup);
2872
2873 /*
2874  * handle incoming message
2875  */
2876 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2877 {
2878         struct ceph_osd *osd = con->private;
2879         struct ceph_osd_client *osdc;
2880         int type = le16_to_cpu(msg->hdr.type);
2881
2882         if (!osd)
2883                 goto out;
2884         osdc = osd->o_osdc;
2885
2886         switch (type) {
2887         case CEPH_MSG_OSD_MAP:
2888                 ceph_osdc_handle_map(osdc, msg);
2889                 break;
2890         case CEPH_MSG_OSD_OPREPLY:
2891                 handle_reply(osdc, msg);
2892                 break;
2893         case CEPH_MSG_WATCH_NOTIFY:
2894                 handle_watch_notify(osdc, msg);
2895                 break;
2896
2897         default:
2898                 pr_err("received unknown message type %d %s\n", type,
2899                        ceph_msg_type_name(type));
2900         }
2901 out:
2902         ceph_msg_put(msg);
2903 }
2904
2905 /*
2906  * Lookup and return message for incoming reply.  Don't try to do
2907  * anything about a larger than preallocated data portion of the
2908  * message at the moment - for now, just skip the message.
2909  */
2910 static struct ceph_msg *get_reply(struct ceph_connection *con,
2911                                   struct ceph_msg_header *hdr,
2912                                   int *skip)
2913 {
2914         struct ceph_osd *osd = con->private;
2915         struct ceph_osd_client *osdc = osd->o_osdc;
2916         struct ceph_msg *m;
2917         struct ceph_osd_request *req;
2918         int front_len = le32_to_cpu(hdr->front_len);
2919         int data_len = le32_to_cpu(hdr->data_len);
2920         u64 tid;
2921
2922         tid = le64_to_cpu(hdr->tid);
2923         mutex_lock(&osdc->request_mutex);
2924         req = lookup_request(&osdc->requests, tid);
2925         if (!req) {
2926                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
2927                      osd->o_osd, tid);
2928                 m = NULL;
2929                 *skip = 1;
2930                 goto out;
2931         }
2932
2933         ceph_msg_revoke_incoming(req->r_reply);
2934
2935         if (front_len > req->r_reply->front_alloc_len) {
2936                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
2937                         __func__, osd->o_osd, req->r_tid, front_len,
2938                         req->r_reply->front_alloc_len);
2939                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
2940                                  false);
2941                 if (!m)
2942                         goto out;
2943                 ceph_msg_put(req->r_reply);
2944                 req->r_reply = m;
2945         }
2946
2947         if (data_len > req->r_reply->data_length) {
2948                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
2949                         __func__, osd->o_osd, req->r_tid, data_len,
2950                         req->r_reply->data_length);
2951                 m = NULL;
2952                 *skip = 1;
2953                 goto out;
2954         }
2955
2956         m = ceph_msg_get(req->r_reply);
2957         dout("get_reply tid %lld %p\n", tid, m);
2958
2959 out:
2960         mutex_unlock(&osdc->request_mutex);
2961         return m;
2962 }
2963
2964 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
2965                                   struct ceph_msg_header *hdr,
2966                                   int *skip)
2967 {
2968         struct ceph_osd *osd = con->private;
2969         int type = le16_to_cpu(hdr->type);
2970         int front = le32_to_cpu(hdr->front_len);
2971
2972         *skip = 0;
2973         switch (type) {
2974         case CEPH_MSG_OSD_MAP:
2975         case CEPH_MSG_WATCH_NOTIFY:
2976                 return ceph_msg_new(type, front, GFP_NOFS, false);
2977         case CEPH_MSG_OSD_OPREPLY:
2978                 return get_reply(con, hdr, skip);
2979         default:
2980                 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
2981                         osd->o_osd);
2982                 *skip = 1;
2983                 return NULL;
2984         }
2985 }
2986
2987 /*
2988  * Wrappers to refcount containing ceph_osd struct
2989  */
2990 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
2991 {
2992         struct ceph_osd *osd = con->private;
2993         if (get_osd(osd))
2994                 return con;
2995         return NULL;
2996 }
2997
2998 static void put_osd_con(struct ceph_connection *con)
2999 {
3000         struct ceph_osd *osd = con->private;
3001         put_osd(osd);
3002 }
3003
3004 /*
3005  * authentication
3006  */
3007 /*
3008  * Note: returned pointer is the address of a structure that's
3009  * managed separately.  Caller must *not* attempt to free it.
3010  */
3011 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3012                                         int *proto, int force_new)
3013 {
3014         struct ceph_osd *o = con->private;
3015         struct ceph_osd_client *osdc = o->o_osdc;
3016         struct ceph_auth_client *ac = osdc->client->monc.auth;
3017         struct ceph_auth_handshake *auth = &o->o_auth;
3018
3019         if (force_new && auth->authorizer) {
3020                 ceph_auth_destroy_authorizer(auth->authorizer);
3021                 auth->authorizer = NULL;
3022         }
3023         if (!auth->authorizer) {
3024                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
3025                                                       auth);
3026                 if (ret)
3027                         return ERR_PTR(ret);
3028         } else {
3029                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
3030                                                      auth);
3031                 if (ret)
3032                         return ERR_PTR(ret);
3033         }
3034         *proto = ac->protocol;
3035
3036         return auth;
3037 }
3038
3039
3040 static int verify_authorizer_reply(struct ceph_connection *con, int len)
3041 {
3042         struct ceph_osd *o = con->private;
3043         struct ceph_osd_client *osdc = o->o_osdc;
3044         struct ceph_auth_client *ac = osdc->client->monc.auth;
3045
3046         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
3047 }
3048
3049 static int invalidate_authorizer(struct ceph_connection *con)
3050 {
3051         struct ceph_osd *o = con->private;
3052         struct ceph_osd_client *osdc = o->o_osdc;
3053         struct ceph_auth_client *ac = osdc->client->monc.auth;
3054
3055         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
3056         return ceph_monc_validate_auth(&osdc->client->monc);
3057 }
3058
3059 static int osd_sign_message(struct ceph_msg *msg)
3060 {
3061         struct ceph_osd *o = msg->con->private;
3062         struct ceph_auth_handshake *auth = &o->o_auth;
3063
3064         return ceph_auth_sign_message(auth, msg);
3065 }
3066
3067 static int osd_check_message_signature(struct ceph_msg *msg)
3068 {
3069         struct ceph_osd *o = msg->con->private;
3070         struct ceph_auth_handshake *auth = &o->o_auth;
3071
3072         return ceph_auth_check_message_signature(auth, msg);
3073 }
3074
3075 static const struct ceph_connection_operations osd_con_ops = {
3076         .get = get_osd_con,
3077         .put = put_osd_con,
3078         .dispatch = dispatch,
3079         .get_authorizer = get_authorizer,
3080         .verify_authorizer_reply = verify_authorizer_reply,
3081         .invalidate_authorizer = invalidate_authorizer,
3082         .alloc_msg = alloc_msg,
3083         .sign_message = osd_sign_message,
3084         .check_message_signature = osd_check_message_signature,
3085         .fault = osd_reset,
3086 };