libceph: introduce ceph_spg, ceph_pg_to_primary_shard()
[linux-block.git] / net / ceph / osd_client.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/err.h>
6 #include <linux/highmem.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
11 #ifdef CONFIG_BLOCK
12 #include <linux/bio.h>
13 #endif
14
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
21
22 #define OSD_OPREPLY_FRONT_LEN   512
23
24 static struct kmem_cache        *ceph_osd_request_cache;
25
26 static const struct ceph_connection_operations osd_con_ops;
27
28 /*
29  * Implement client access to distributed object storage cluster.
30  *
31  * All data objects are stored within a cluster/cloud of OSDs, or
32  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
33  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
34  * remote daemons serving up and coordinating consistent and safe
35  * access to storage.
36  *
37  * Cluster membership and the mapping of data objects onto storage devices
38  * are described by the osd map.
39  *
40  * We keep track of pending OSD requests (read, write), resubmit
41  * requests to different OSDs when the cluster topology/data layout
42  * change, or retry the affected requests when the communications
43  * channel with an OSD is reset.
44  */
45
46 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
47 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
48 static void link_linger(struct ceph_osd *osd,
49                         struct ceph_osd_linger_request *lreq);
50 static void unlink_linger(struct ceph_osd *osd,
51                           struct ceph_osd_linger_request *lreq);
52
53 #if 1
54 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
55 {
56         bool wrlocked = true;
57
58         if (unlikely(down_read_trylock(sem))) {
59                 wrlocked = false;
60                 up_read(sem);
61         }
62
63         return wrlocked;
64 }
65 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
66 {
67         WARN_ON(!rwsem_is_locked(&osdc->lock));
68 }
69 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
70 {
71         WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
72 }
73 static inline void verify_osd_locked(struct ceph_osd *osd)
74 {
75         struct ceph_osd_client *osdc = osd->o_osdc;
76
77         WARN_ON(!(mutex_is_locked(&osd->lock) &&
78                   rwsem_is_locked(&osdc->lock)) &&
79                 !rwsem_is_wrlocked(&osdc->lock));
80 }
81 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
82 {
83         WARN_ON(!mutex_is_locked(&lreq->lock));
84 }
85 #else
86 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
87 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
88 static inline void verify_osd_locked(struct ceph_osd *osd) { }
89 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
90 #endif
91
92 /*
93  * calculate the mapping of a file extent onto an object, and fill out the
94  * request accordingly.  shorten extent as necessary if it crosses an
95  * object boundary.
96  *
97  * fill osd op in request message.
98  */
99 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
100                         u64 *objnum, u64 *objoff, u64 *objlen)
101 {
102         u64 orig_len = *plen;
103         int r;
104
105         /* object extent? */
106         r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
107                                           objoff, objlen);
108         if (r < 0)
109                 return r;
110         if (*objlen < orig_len) {
111                 *plen = *objlen;
112                 dout(" skipping last %llu, final file extent %llu~%llu\n",
113                      orig_len - *plen, off, *plen);
114         }
115
116         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
117
118         return 0;
119 }
120
121 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
122 {
123         memset(osd_data, 0, sizeof (*osd_data));
124         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
125 }
126
127 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
128                         struct page **pages, u64 length, u32 alignment,
129                         bool pages_from_pool, bool own_pages)
130 {
131         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
132         osd_data->pages = pages;
133         osd_data->length = length;
134         osd_data->alignment = alignment;
135         osd_data->pages_from_pool = pages_from_pool;
136         osd_data->own_pages = own_pages;
137 }
138
139 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
140                         struct ceph_pagelist *pagelist)
141 {
142         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
143         osd_data->pagelist = pagelist;
144 }
145
146 #ifdef CONFIG_BLOCK
147 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
148                         struct bio *bio, size_t bio_length)
149 {
150         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
151         osd_data->bio = bio;
152         osd_data->bio_length = bio_length;
153 }
154 #endif /* CONFIG_BLOCK */
155
156 #define osd_req_op_data(oreq, whch, typ, fld)                           \
157 ({                                                                      \
158         struct ceph_osd_request *__oreq = (oreq);                       \
159         unsigned int __whch = (whch);                                   \
160         BUG_ON(__whch >= __oreq->r_num_ops);                            \
161         &__oreq->r_ops[__whch].typ.fld;                                 \
162 })
163
164 static struct ceph_osd_data *
165 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
166 {
167         BUG_ON(which >= osd_req->r_num_ops);
168
169         return &osd_req->r_ops[which].raw_data_in;
170 }
171
172 struct ceph_osd_data *
173 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
174                         unsigned int which)
175 {
176         return osd_req_op_data(osd_req, which, extent, osd_data);
177 }
178 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
179
180 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
181                         unsigned int which, struct page **pages,
182                         u64 length, u32 alignment,
183                         bool pages_from_pool, bool own_pages)
184 {
185         struct ceph_osd_data *osd_data;
186
187         osd_data = osd_req_op_raw_data_in(osd_req, which);
188         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
189                                 pages_from_pool, own_pages);
190 }
191 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
192
193 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
194                         unsigned int which, struct page **pages,
195                         u64 length, u32 alignment,
196                         bool pages_from_pool, bool own_pages)
197 {
198         struct ceph_osd_data *osd_data;
199
200         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
201         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
202                                 pages_from_pool, own_pages);
203 }
204 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
205
206 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
207                         unsigned int which, struct ceph_pagelist *pagelist)
208 {
209         struct ceph_osd_data *osd_data;
210
211         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
212         ceph_osd_data_pagelist_init(osd_data, pagelist);
213 }
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
215
216 #ifdef CONFIG_BLOCK
217 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
218                         unsigned int which, struct bio *bio, size_t bio_length)
219 {
220         struct ceph_osd_data *osd_data;
221
222         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
223         ceph_osd_data_bio_init(osd_data, bio, bio_length);
224 }
225 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
226 #endif /* CONFIG_BLOCK */
227
228 static void osd_req_op_cls_request_info_pagelist(
229                         struct ceph_osd_request *osd_req,
230                         unsigned int which, struct ceph_pagelist *pagelist)
231 {
232         struct ceph_osd_data *osd_data;
233
234         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
235         ceph_osd_data_pagelist_init(osd_data, pagelist);
236 }
237
238 void osd_req_op_cls_request_data_pagelist(
239                         struct ceph_osd_request *osd_req,
240                         unsigned int which, struct ceph_pagelist *pagelist)
241 {
242         struct ceph_osd_data *osd_data;
243
244         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
245         ceph_osd_data_pagelist_init(osd_data, pagelist);
246         osd_req->r_ops[which].cls.indata_len += pagelist->length;
247         osd_req->r_ops[which].indata_len += pagelist->length;
248 }
249 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
250
251 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
252                         unsigned int which, struct page **pages, u64 length,
253                         u32 alignment, bool pages_from_pool, bool own_pages)
254 {
255         struct ceph_osd_data *osd_data;
256
257         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
258         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
259                                 pages_from_pool, own_pages);
260         osd_req->r_ops[which].cls.indata_len += length;
261         osd_req->r_ops[which].indata_len += length;
262 }
263 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
264
265 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
266                         unsigned int which, struct page **pages, u64 length,
267                         u32 alignment, bool pages_from_pool, bool own_pages)
268 {
269         struct ceph_osd_data *osd_data;
270
271         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
272         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
273                                 pages_from_pool, own_pages);
274 }
275 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
276
277 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
278 {
279         switch (osd_data->type) {
280         case CEPH_OSD_DATA_TYPE_NONE:
281                 return 0;
282         case CEPH_OSD_DATA_TYPE_PAGES:
283                 return osd_data->length;
284         case CEPH_OSD_DATA_TYPE_PAGELIST:
285                 return (u64)osd_data->pagelist->length;
286 #ifdef CONFIG_BLOCK
287         case CEPH_OSD_DATA_TYPE_BIO:
288                 return (u64)osd_data->bio_length;
289 #endif /* CONFIG_BLOCK */
290         default:
291                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
292                 return 0;
293         }
294 }
295
296 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
297 {
298         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
299                 int num_pages;
300
301                 num_pages = calc_pages_for((u64)osd_data->alignment,
302                                                 (u64)osd_data->length);
303                 ceph_release_page_vector(osd_data->pages, num_pages);
304         }
305         ceph_osd_data_init(osd_data);
306 }
307
308 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
309                         unsigned int which)
310 {
311         struct ceph_osd_req_op *op;
312
313         BUG_ON(which >= osd_req->r_num_ops);
314         op = &osd_req->r_ops[which];
315
316         switch (op->op) {
317         case CEPH_OSD_OP_READ:
318         case CEPH_OSD_OP_WRITE:
319         case CEPH_OSD_OP_WRITEFULL:
320                 ceph_osd_data_release(&op->extent.osd_data);
321                 break;
322         case CEPH_OSD_OP_CALL:
323                 ceph_osd_data_release(&op->cls.request_info);
324                 ceph_osd_data_release(&op->cls.request_data);
325                 ceph_osd_data_release(&op->cls.response_data);
326                 break;
327         case CEPH_OSD_OP_SETXATTR:
328         case CEPH_OSD_OP_CMPXATTR:
329                 ceph_osd_data_release(&op->xattr.osd_data);
330                 break;
331         case CEPH_OSD_OP_STAT:
332                 ceph_osd_data_release(&op->raw_data_in);
333                 break;
334         case CEPH_OSD_OP_NOTIFY_ACK:
335                 ceph_osd_data_release(&op->notify_ack.request_data);
336                 break;
337         case CEPH_OSD_OP_NOTIFY:
338                 ceph_osd_data_release(&op->notify.request_data);
339                 ceph_osd_data_release(&op->notify.response_data);
340                 break;
341         case CEPH_OSD_OP_LIST_WATCHERS:
342                 ceph_osd_data_release(&op->list_watchers.response_data);
343                 break;
344         default:
345                 break;
346         }
347 }
348
349 /*
350  * Assumes @t is zero-initialized.
351  */
352 static void target_init(struct ceph_osd_request_target *t)
353 {
354         ceph_oid_init(&t->base_oid);
355         ceph_oloc_init(&t->base_oloc);
356         ceph_oid_init(&t->target_oid);
357         ceph_oloc_init(&t->target_oloc);
358
359         ceph_osds_init(&t->acting);
360         ceph_osds_init(&t->up);
361         t->size = -1;
362         t->min_size = -1;
363
364         t->osd = CEPH_HOMELESS_OSD;
365 }
366
367 static void target_copy(struct ceph_osd_request_target *dest,
368                         const struct ceph_osd_request_target *src)
369 {
370         ceph_oid_copy(&dest->base_oid, &src->base_oid);
371         ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
372         ceph_oid_copy(&dest->target_oid, &src->target_oid);
373         ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
374
375         dest->pgid = src->pgid; /* struct */
376         dest->spgid = src->spgid; /* struct */
377         dest->pg_num = src->pg_num;
378         dest->pg_num_mask = src->pg_num_mask;
379         ceph_osds_copy(&dest->acting, &src->acting);
380         ceph_osds_copy(&dest->up, &src->up);
381         dest->size = src->size;
382         dest->min_size = src->min_size;
383         dest->sort_bitwise = src->sort_bitwise;
384
385         dest->flags = src->flags;
386         dest->paused = src->paused;
387
388         dest->last_force_resend = src->last_force_resend;
389
390         dest->osd = src->osd;
391 }
392
393 static void target_destroy(struct ceph_osd_request_target *t)
394 {
395         ceph_oid_destroy(&t->base_oid);
396         ceph_oloc_destroy(&t->base_oloc);
397         ceph_oid_destroy(&t->target_oid);
398         ceph_oloc_destroy(&t->target_oloc);
399 }
400
401 /*
402  * requests
403  */
404 static void request_release_checks(struct ceph_osd_request *req)
405 {
406         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
407         WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
408         WARN_ON(!list_empty(&req->r_unsafe_item));
409         WARN_ON(req->r_osd);
410 }
411
412 static void ceph_osdc_release_request(struct kref *kref)
413 {
414         struct ceph_osd_request *req = container_of(kref,
415                                             struct ceph_osd_request, r_kref);
416         unsigned int which;
417
418         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
419              req->r_request, req->r_reply);
420         request_release_checks(req);
421
422         if (req->r_request)
423                 ceph_msg_put(req->r_request);
424         if (req->r_reply)
425                 ceph_msg_put(req->r_reply);
426
427         for (which = 0; which < req->r_num_ops; which++)
428                 osd_req_op_data_release(req, which);
429
430         target_destroy(&req->r_t);
431         ceph_put_snap_context(req->r_snapc);
432
433         if (req->r_mempool)
434                 mempool_free(req, req->r_osdc->req_mempool);
435         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
436                 kmem_cache_free(ceph_osd_request_cache, req);
437         else
438                 kfree(req);
439 }
440
441 void ceph_osdc_get_request(struct ceph_osd_request *req)
442 {
443         dout("%s %p (was %d)\n", __func__, req,
444              kref_read(&req->r_kref));
445         kref_get(&req->r_kref);
446 }
447 EXPORT_SYMBOL(ceph_osdc_get_request);
448
449 void ceph_osdc_put_request(struct ceph_osd_request *req)
450 {
451         if (req) {
452                 dout("%s %p (was %d)\n", __func__, req,
453                      kref_read(&req->r_kref));
454                 kref_put(&req->r_kref, ceph_osdc_release_request);
455         }
456 }
457 EXPORT_SYMBOL(ceph_osdc_put_request);
458
459 static void request_init(struct ceph_osd_request *req)
460 {
461         /* req only, each op is zeroed in _osd_req_op_init() */
462         memset(req, 0, sizeof(*req));
463
464         kref_init(&req->r_kref);
465         init_completion(&req->r_completion);
466         RB_CLEAR_NODE(&req->r_node);
467         RB_CLEAR_NODE(&req->r_mc_node);
468         INIT_LIST_HEAD(&req->r_unsafe_item);
469
470         target_init(&req->r_t);
471 }
472
473 /*
474  * This is ugly, but it allows us to reuse linger registration and ping
475  * requests, keeping the structure of the code around send_linger{_ping}()
476  * reasonable.  Setting up a min_nr=2 mempool for each linger request
477  * and dealing with copying ops (this blasts req only, watch op remains
478  * intact) isn't any better.
479  */
480 static void request_reinit(struct ceph_osd_request *req)
481 {
482         struct ceph_osd_client *osdc = req->r_osdc;
483         bool mempool = req->r_mempool;
484         unsigned int num_ops = req->r_num_ops;
485         u64 snapid = req->r_snapid;
486         struct ceph_snap_context *snapc = req->r_snapc;
487         bool linger = req->r_linger;
488         struct ceph_msg *request_msg = req->r_request;
489         struct ceph_msg *reply_msg = req->r_reply;
490
491         dout("%s req %p\n", __func__, req);
492         WARN_ON(kref_read(&req->r_kref) != 1);
493         request_release_checks(req);
494
495         WARN_ON(kref_read(&request_msg->kref) != 1);
496         WARN_ON(kref_read(&reply_msg->kref) != 1);
497         target_destroy(&req->r_t);
498
499         request_init(req);
500         req->r_osdc = osdc;
501         req->r_mempool = mempool;
502         req->r_num_ops = num_ops;
503         req->r_snapid = snapid;
504         req->r_snapc = snapc;
505         req->r_linger = linger;
506         req->r_request = request_msg;
507         req->r_reply = reply_msg;
508 }
509
510 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
511                                                struct ceph_snap_context *snapc,
512                                                unsigned int num_ops,
513                                                bool use_mempool,
514                                                gfp_t gfp_flags)
515 {
516         struct ceph_osd_request *req;
517
518         if (use_mempool) {
519                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
520                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
521         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
522                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
523         } else {
524                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
525                 req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
526                               gfp_flags);
527         }
528         if (unlikely(!req))
529                 return NULL;
530
531         request_init(req);
532         req->r_osdc = osdc;
533         req->r_mempool = use_mempool;
534         req->r_num_ops = num_ops;
535         req->r_snapid = CEPH_NOSNAP;
536         req->r_snapc = ceph_get_snap_context(snapc);
537
538         dout("%s req %p\n", __func__, req);
539         return req;
540 }
541 EXPORT_SYMBOL(ceph_osdc_alloc_request);
542
543 static int ceph_oloc_encoding_size(struct ceph_object_locator *oloc)
544 {
545         return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
546 }
547
548 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
549 {
550         struct ceph_osd_client *osdc = req->r_osdc;
551         struct ceph_msg *msg;
552         int msg_size;
553
554         WARN_ON(ceph_oid_empty(&req->r_base_oid));
555         WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
556
557         /* create request message */
558         msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
559         msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
560         msg_size += CEPH_ENCODING_START_BLK_LEN +
561                         ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
562         msg_size += 1 + 8 + 4 + 4; /* pgid */
563         msg_size += 4 + req->r_base_oid.name_len; /* oid */
564         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
565         msg_size += 8; /* snapid */
566         msg_size += 8; /* snap_seq */
567         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
568         msg_size += 4; /* retry_attempt */
569
570         if (req->r_mempool)
571                 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
572         else
573                 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
574         if (!msg)
575                 return -ENOMEM;
576
577         memset(msg->front.iov_base, 0, msg->front.iov_len);
578         req->r_request = msg;
579
580         /* create reply message */
581         msg_size = OSD_OPREPLY_FRONT_LEN;
582         msg_size += req->r_base_oid.name_len;
583         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
584
585         if (req->r_mempool)
586                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
587         else
588                 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
589         if (!msg)
590                 return -ENOMEM;
591
592         req->r_reply = msg;
593
594         return 0;
595 }
596 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
597
598 static bool osd_req_opcode_valid(u16 opcode)
599 {
600         switch (opcode) {
601 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
602 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
603 #undef GENERATE_CASE
604         default:
605                 return false;
606         }
607 }
608
609 /*
610  * This is an osd op init function for opcodes that have no data or
611  * other information associated with them.  It also serves as a
612  * common init routine for all the other init functions, below.
613  */
614 static struct ceph_osd_req_op *
615 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
616                  u16 opcode, u32 flags)
617 {
618         struct ceph_osd_req_op *op;
619
620         BUG_ON(which >= osd_req->r_num_ops);
621         BUG_ON(!osd_req_opcode_valid(opcode));
622
623         op = &osd_req->r_ops[which];
624         memset(op, 0, sizeof (*op));
625         op->op = opcode;
626         op->flags = flags;
627
628         return op;
629 }
630
631 void osd_req_op_init(struct ceph_osd_request *osd_req,
632                      unsigned int which, u16 opcode, u32 flags)
633 {
634         (void)_osd_req_op_init(osd_req, which, opcode, flags);
635 }
636 EXPORT_SYMBOL(osd_req_op_init);
637
638 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
639                                 unsigned int which, u16 opcode,
640                                 u64 offset, u64 length,
641                                 u64 truncate_size, u32 truncate_seq)
642 {
643         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
644                                                       opcode, 0);
645         size_t payload_len = 0;
646
647         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
648                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
649                opcode != CEPH_OSD_OP_TRUNCATE);
650
651         op->extent.offset = offset;
652         op->extent.length = length;
653         op->extent.truncate_size = truncate_size;
654         op->extent.truncate_seq = truncate_seq;
655         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
656                 payload_len += length;
657
658         op->indata_len = payload_len;
659 }
660 EXPORT_SYMBOL(osd_req_op_extent_init);
661
662 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
663                                 unsigned int which, u64 length)
664 {
665         struct ceph_osd_req_op *op;
666         u64 previous;
667
668         BUG_ON(which >= osd_req->r_num_ops);
669         op = &osd_req->r_ops[which];
670         previous = op->extent.length;
671
672         if (length == previous)
673                 return;         /* Nothing to do */
674         BUG_ON(length > previous);
675
676         op->extent.length = length;
677         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
678                 op->indata_len -= previous - length;
679 }
680 EXPORT_SYMBOL(osd_req_op_extent_update);
681
682 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
683                                 unsigned int which, u64 offset_inc)
684 {
685         struct ceph_osd_req_op *op, *prev_op;
686
687         BUG_ON(which + 1 >= osd_req->r_num_ops);
688
689         prev_op = &osd_req->r_ops[which];
690         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
691         /* dup previous one */
692         op->indata_len = prev_op->indata_len;
693         op->outdata_len = prev_op->outdata_len;
694         op->extent = prev_op->extent;
695         /* adjust offset */
696         op->extent.offset += offset_inc;
697         op->extent.length -= offset_inc;
698
699         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
700                 op->indata_len -= offset_inc;
701 }
702 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
703
704 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
705                         u16 opcode, const char *class, const char *method)
706 {
707         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
708                                                       opcode, 0);
709         struct ceph_pagelist *pagelist;
710         size_t payload_len = 0;
711         size_t size;
712
713         BUG_ON(opcode != CEPH_OSD_OP_CALL);
714
715         pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
716         BUG_ON(!pagelist);
717         ceph_pagelist_init(pagelist);
718
719         op->cls.class_name = class;
720         size = strlen(class);
721         BUG_ON(size > (size_t) U8_MAX);
722         op->cls.class_len = size;
723         ceph_pagelist_append(pagelist, class, size);
724         payload_len += size;
725
726         op->cls.method_name = method;
727         size = strlen(method);
728         BUG_ON(size > (size_t) U8_MAX);
729         op->cls.method_len = size;
730         ceph_pagelist_append(pagelist, method, size);
731         payload_len += size;
732
733         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
734
735         op->indata_len = payload_len;
736 }
737 EXPORT_SYMBOL(osd_req_op_cls_init);
738
739 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
740                           u16 opcode, const char *name, const void *value,
741                           size_t size, u8 cmp_op, u8 cmp_mode)
742 {
743         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
744                                                       opcode, 0);
745         struct ceph_pagelist *pagelist;
746         size_t payload_len;
747
748         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
749
750         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
751         if (!pagelist)
752                 return -ENOMEM;
753
754         ceph_pagelist_init(pagelist);
755
756         payload_len = strlen(name);
757         op->xattr.name_len = payload_len;
758         ceph_pagelist_append(pagelist, name, payload_len);
759
760         op->xattr.value_len = size;
761         ceph_pagelist_append(pagelist, value, size);
762         payload_len += size;
763
764         op->xattr.cmp_op = cmp_op;
765         op->xattr.cmp_mode = cmp_mode;
766
767         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
768         op->indata_len = payload_len;
769         return 0;
770 }
771 EXPORT_SYMBOL(osd_req_op_xattr_init);
772
773 /*
774  * @watch_opcode: CEPH_OSD_WATCH_OP_*
775  */
776 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
777                                   u64 cookie, u8 watch_opcode)
778 {
779         struct ceph_osd_req_op *op;
780
781         op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
782         op->watch.cookie = cookie;
783         op->watch.op = watch_opcode;
784         op->watch.gen = 0;
785 }
786
787 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
788                                 unsigned int which,
789                                 u64 expected_object_size,
790                                 u64 expected_write_size)
791 {
792         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
793                                                       CEPH_OSD_OP_SETALLOCHINT,
794                                                       0);
795
796         op->alloc_hint.expected_object_size = expected_object_size;
797         op->alloc_hint.expected_write_size = expected_write_size;
798
799         /*
800          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
801          * not worth a feature bit.  Set FAILOK per-op flag to make
802          * sure older osds don't trip over an unsupported opcode.
803          */
804         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
805 }
806 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
807
808 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
809                                 struct ceph_osd_data *osd_data)
810 {
811         u64 length = ceph_osd_data_length(osd_data);
812
813         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
814                 BUG_ON(length > (u64) SIZE_MAX);
815                 if (length)
816                         ceph_msg_data_add_pages(msg, osd_data->pages,
817                                         length, osd_data->alignment);
818         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
819                 BUG_ON(!length);
820                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
821 #ifdef CONFIG_BLOCK
822         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
823                 ceph_msg_data_add_bio(msg, osd_data->bio, length);
824 #endif
825         } else {
826                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
827         }
828 }
829
830 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
831                              const struct ceph_osd_req_op *src)
832 {
833         if (WARN_ON(!osd_req_opcode_valid(src->op))) {
834                 pr_err("unrecognized osd opcode %d\n", src->op);
835
836                 return 0;
837         }
838
839         switch (src->op) {
840         case CEPH_OSD_OP_STAT:
841                 break;
842         case CEPH_OSD_OP_READ:
843         case CEPH_OSD_OP_WRITE:
844         case CEPH_OSD_OP_WRITEFULL:
845         case CEPH_OSD_OP_ZERO:
846         case CEPH_OSD_OP_TRUNCATE:
847                 dst->extent.offset = cpu_to_le64(src->extent.offset);
848                 dst->extent.length = cpu_to_le64(src->extent.length);
849                 dst->extent.truncate_size =
850                         cpu_to_le64(src->extent.truncate_size);
851                 dst->extent.truncate_seq =
852                         cpu_to_le32(src->extent.truncate_seq);
853                 break;
854         case CEPH_OSD_OP_CALL:
855                 dst->cls.class_len = src->cls.class_len;
856                 dst->cls.method_len = src->cls.method_len;
857                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
858                 break;
859         case CEPH_OSD_OP_STARTSYNC:
860                 break;
861         case CEPH_OSD_OP_WATCH:
862                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
863                 dst->watch.ver = cpu_to_le64(0);
864                 dst->watch.op = src->watch.op;
865                 dst->watch.gen = cpu_to_le32(src->watch.gen);
866                 break;
867         case CEPH_OSD_OP_NOTIFY_ACK:
868                 break;
869         case CEPH_OSD_OP_NOTIFY:
870                 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
871                 break;
872         case CEPH_OSD_OP_LIST_WATCHERS:
873                 break;
874         case CEPH_OSD_OP_SETALLOCHINT:
875                 dst->alloc_hint.expected_object_size =
876                     cpu_to_le64(src->alloc_hint.expected_object_size);
877                 dst->alloc_hint.expected_write_size =
878                     cpu_to_le64(src->alloc_hint.expected_write_size);
879                 break;
880         case CEPH_OSD_OP_SETXATTR:
881         case CEPH_OSD_OP_CMPXATTR:
882                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
883                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
884                 dst->xattr.cmp_op = src->xattr.cmp_op;
885                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
886                 break;
887         case CEPH_OSD_OP_CREATE:
888         case CEPH_OSD_OP_DELETE:
889                 break;
890         default:
891                 pr_err("unsupported osd opcode %s\n",
892                         ceph_osd_op_name(src->op));
893                 WARN_ON(1);
894
895                 return 0;
896         }
897
898         dst->op = cpu_to_le16(src->op);
899         dst->flags = cpu_to_le32(src->flags);
900         dst->payload_len = cpu_to_le32(src->indata_len);
901
902         return src->indata_len;
903 }
904
905 /*
906  * build new request AND message, calculate layout, and adjust file
907  * extent as needed.
908  *
909  * if the file was recently truncated, we include information about its
910  * old and new size so that the object can be updated appropriately.  (we
911  * avoid synchronously deleting truncated objects because it's slow.)
912  *
913  * if @do_sync, include a 'startsync' command so that the osd will flush
914  * data quickly.
915  */
916 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
917                                                struct ceph_file_layout *layout,
918                                                struct ceph_vino vino,
919                                                u64 off, u64 *plen,
920                                                unsigned int which, int num_ops,
921                                                int opcode, int flags,
922                                                struct ceph_snap_context *snapc,
923                                                u32 truncate_seq,
924                                                u64 truncate_size,
925                                                bool use_mempool)
926 {
927         struct ceph_osd_request *req;
928         u64 objnum = 0;
929         u64 objoff = 0;
930         u64 objlen = 0;
931         int r;
932
933         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
934                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
935                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
936
937         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
938                                         GFP_NOFS);
939         if (!req) {
940                 r = -ENOMEM;
941                 goto fail;
942         }
943
944         /* calculate max write size */
945         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
946         if (r)
947                 goto fail;
948
949         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
950                 osd_req_op_init(req, which, opcode, 0);
951         } else {
952                 u32 object_size = layout->object_size;
953                 u32 object_base = off - objoff;
954                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
955                         if (truncate_size <= object_base) {
956                                 truncate_size = 0;
957                         } else {
958                                 truncate_size -= object_base;
959                                 if (truncate_size > object_size)
960                                         truncate_size = object_size;
961                         }
962                 }
963                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
964                                        truncate_size, truncate_seq);
965         }
966
967         req->r_abort_on_full = true;
968         req->r_flags = flags;
969         req->r_base_oloc.pool = layout->pool_id;
970         req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
971         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
972
973         req->r_snapid = vino.snap;
974         if (flags & CEPH_OSD_FLAG_WRITE)
975                 req->r_data_offset = off;
976
977         r = ceph_osdc_alloc_messages(req, GFP_NOFS);
978         if (r)
979                 goto fail;
980
981         return req;
982
983 fail:
984         ceph_osdc_put_request(req);
985         return ERR_PTR(r);
986 }
987 EXPORT_SYMBOL(ceph_osdc_new_request);
988
989 /*
990  * We keep osd requests in an rbtree, sorted by ->r_tid.
991  */
992 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
993 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
994
995 static bool osd_homeless(struct ceph_osd *osd)
996 {
997         return osd->o_osd == CEPH_HOMELESS_OSD;
998 }
999
1000 static bool osd_registered(struct ceph_osd *osd)
1001 {
1002         verify_osdc_locked(osd->o_osdc);
1003
1004         return !RB_EMPTY_NODE(&osd->o_node);
1005 }
1006
1007 /*
1008  * Assumes @osd is zero-initialized.
1009  */
1010 static void osd_init(struct ceph_osd *osd)
1011 {
1012         refcount_set(&osd->o_ref, 1);
1013         RB_CLEAR_NODE(&osd->o_node);
1014         osd->o_requests = RB_ROOT;
1015         osd->o_linger_requests = RB_ROOT;
1016         INIT_LIST_HEAD(&osd->o_osd_lru);
1017         INIT_LIST_HEAD(&osd->o_keepalive_item);
1018         osd->o_incarnation = 1;
1019         mutex_init(&osd->lock);
1020 }
1021
1022 static void osd_cleanup(struct ceph_osd *osd)
1023 {
1024         WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1025         WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1026         WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1027         WARN_ON(!list_empty(&osd->o_osd_lru));
1028         WARN_ON(!list_empty(&osd->o_keepalive_item));
1029
1030         if (osd->o_auth.authorizer) {
1031                 WARN_ON(osd_homeless(osd));
1032                 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1033         }
1034 }
1035
1036 /*
1037  * Track open sessions with osds.
1038  */
1039 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1040 {
1041         struct ceph_osd *osd;
1042
1043         WARN_ON(onum == CEPH_HOMELESS_OSD);
1044
1045         osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1046         osd_init(osd);
1047         osd->o_osdc = osdc;
1048         osd->o_osd = onum;
1049
1050         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1051
1052         return osd;
1053 }
1054
1055 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1056 {
1057         if (refcount_inc_not_zero(&osd->o_ref)) {
1058                 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1059                      refcount_read(&osd->o_ref));
1060                 return osd;
1061         } else {
1062                 dout("get_osd %p FAIL\n", osd);
1063                 return NULL;
1064         }
1065 }
1066
1067 static void put_osd(struct ceph_osd *osd)
1068 {
1069         dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1070              refcount_read(&osd->o_ref) - 1);
1071         if (refcount_dec_and_test(&osd->o_ref)) {
1072                 osd_cleanup(osd);
1073                 kfree(osd);
1074         }
1075 }
1076
1077 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1078
1079 static void __move_osd_to_lru(struct ceph_osd *osd)
1080 {
1081         struct ceph_osd_client *osdc = osd->o_osdc;
1082
1083         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1084         BUG_ON(!list_empty(&osd->o_osd_lru));
1085
1086         spin_lock(&osdc->osd_lru_lock);
1087         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1088         spin_unlock(&osdc->osd_lru_lock);
1089
1090         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1091 }
1092
1093 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1094 {
1095         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1096             RB_EMPTY_ROOT(&osd->o_linger_requests))
1097                 __move_osd_to_lru(osd);
1098 }
1099
1100 static void __remove_osd_from_lru(struct ceph_osd *osd)
1101 {
1102         struct ceph_osd_client *osdc = osd->o_osdc;
1103
1104         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1105
1106         spin_lock(&osdc->osd_lru_lock);
1107         if (!list_empty(&osd->o_osd_lru))
1108                 list_del_init(&osd->o_osd_lru);
1109         spin_unlock(&osdc->osd_lru_lock);
1110 }
1111
1112 /*
1113  * Close the connection and assign any leftover requests to the
1114  * homeless session.
1115  */
1116 static void close_osd(struct ceph_osd *osd)
1117 {
1118         struct ceph_osd_client *osdc = osd->o_osdc;
1119         struct rb_node *n;
1120
1121         verify_osdc_wrlocked(osdc);
1122         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1123
1124         ceph_con_close(&osd->o_con);
1125
1126         for (n = rb_first(&osd->o_requests); n; ) {
1127                 struct ceph_osd_request *req =
1128                     rb_entry(n, struct ceph_osd_request, r_node);
1129
1130                 n = rb_next(n); /* unlink_request() */
1131
1132                 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1133                 unlink_request(osd, req);
1134                 link_request(&osdc->homeless_osd, req);
1135         }
1136         for (n = rb_first(&osd->o_linger_requests); n; ) {
1137                 struct ceph_osd_linger_request *lreq =
1138                     rb_entry(n, struct ceph_osd_linger_request, node);
1139
1140                 n = rb_next(n); /* unlink_linger() */
1141
1142                 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1143                      lreq->linger_id);
1144                 unlink_linger(osd, lreq);
1145                 link_linger(&osdc->homeless_osd, lreq);
1146         }
1147
1148         __remove_osd_from_lru(osd);
1149         erase_osd(&osdc->osds, osd);
1150         put_osd(osd);
1151 }
1152
1153 /*
1154  * reset osd connect
1155  */
1156 static int reopen_osd(struct ceph_osd *osd)
1157 {
1158         struct ceph_entity_addr *peer_addr;
1159
1160         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1161
1162         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1163             RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1164                 close_osd(osd);
1165                 return -ENODEV;
1166         }
1167
1168         peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1169         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1170                         !ceph_con_opened(&osd->o_con)) {
1171                 struct rb_node *n;
1172
1173                 dout("osd addr hasn't changed and connection never opened, "
1174                      "letting msgr retry\n");
1175                 /* touch each r_stamp for handle_timeout()'s benfit */
1176                 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1177                         struct ceph_osd_request *req =
1178                             rb_entry(n, struct ceph_osd_request, r_node);
1179                         req->r_stamp = jiffies;
1180                 }
1181
1182                 return -EAGAIN;
1183         }
1184
1185         ceph_con_close(&osd->o_con);
1186         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1187         osd->o_incarnation++;
1188
1189         return 0;
1190 }
1191
1192 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1193                                           bool wrlocked)
1194 {
1195         struct ceph_osd *osd;
1196
1197         if (wrlocked)
1198                 verify_osdc_wrlocked(osdc);
1199         else
1200                 verify_osdc_locked(osdc);
1201
1202         if (o != CEPH_HOMELESS_OSD)
1203                 osd = lookup_osd(&osdc->osds, o);
1204         else
1205                 osd = &osdc->homeless_osd;
1206         if (!osd) {
1207                 if (!wrlocked)
1208                         return ERR_PTR(-EAGAIN);
1209
1210                 osd = create_osd(osdc, o);
1211                 insert_osd(&osdc->osds, osd);
1212                 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1213                               &osdc->osdmap->osd_addr[osd->o_osd]);
1214         }
1215
1216         dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1217         return osd;
1218 }
1219
1220 /*
1221  * Create request <-> OSD session relation.
1222  *
1223  * @req has to be assigned a tid, @osd may be homeless.
1224  */
1225 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1226 {
1227         verify_osd_locked(osd);
1228         WARN_ON(!req->r_tid || req->r_osd);
1229         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1230              req, req->r_tid);
1231
1232         if (!osd_homeless(osd))
1233                 __remove_osd_from_lru(osd);
1234         else
1235                 atomic_inc(&osd->o_osdc->num_homeless);
1236
1237         get_osd(osd);
1238         insert_request(&osd->o_requests, req);
1239         req->r_osd = osd;
1240 }
1241
1242 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1243 {
1244         verify_osd_locked(osd);
1245         WARN_ON(req->r_osd != osd);
1246         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1247              req, req->r_tid);
1248
1249         req->r_osd = NULL;
1250         erase_request(&osd->o_requests, req);
1251         put_osd(osd);
1252
1253         if (!osd_homeless(osd))
1254                 maybe_move_osd_to_lru(osd);
1255         else
1256                 atomic_dec(&osd->o_osdc->num_homeless);
1257 }
1258
1259 static bool __pool_full(struct ceph_pg_pool_info *pi)
1260 {
1261         return pi->flags & CEPH_POOL_FLAG_FULL;
1262 }
1263
1264 static bool have_pool_full(struct ceph_osd_client *osdc)
1265 {
1266         struct rb_node *n;
1267
1268         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1269                 struct ceph_pg_pool_info *pi =
1270                     rb_entry(n, struct ceph_pg_pool_info, node);
1271
1272                 if (__pool_full(pi))
1273                         return true;
1274         }
1275
1276         return false;
1277 }
1278
1279 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1280 {
1281         struct ceph_pg_pool_info *pi;
1282
1283         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1284         if (!pi)
1285                 return false;
1286
1287         return __pool_full(pi);
1288 }
1289
1290 /*
1291  * Returns whether a request should be blocked from being sent
1292  * based on the current osdmap and osd_client settings.
1293  */
1294 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1295                                     const struct ceph_osd_request_target *t,
1296                                     struct ceph_pg_pool_info *pi)
1297 {
1298         bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1299         bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1300                        ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1301                        __pool_full(pi);
1302
1303         WARN_ON(pi->id != t->base_oloc.pool);
1304         return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1305                ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1306                (osdc->osdmap->epoch < osdc->epoch_barrier);
1307 }
1308
1309 enum calc_target_result {
1310         CALC_TARGET_NO_ACTION = 0,
1311         CALC_TARGET_NEED_RESEND,
1312         CALC_TARGET_POOL_DNE,
1313 };
1314
1315 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1316                                            struct ceph_osd_request_target *t,
1317                                            bool any_change)
1318 {
1319         struct ceph_pg_pool_info *pi;
1320         struct ceph_pg pgid, last_pgid;
1321         struct ceph_osds up, acting;
1322         bool force_resend = false;
1323         bool need_check_tiering = false;
1324         bool need_resend = false;
1325         bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1326         enum calc_target_result ct_res;
1327         int ret;
1328
1329         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1330         if (!pi) {
1331                 t->osd = CEPH_HOMELESS_OSD;
1332                 ct_res = CALC_TARGET_POOL_DNE;
1333                 goto out;
1334         }
1335
1336         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1337                 if (t->last_force_resend < pi->last_force_request_resend) {
1338                         t->last_force_resend = pi->last_force_request_resend;
1339                         force_resend = true;
1340                 } else if (t->last_force_resend == 0) {
1341                         force_resend = true;
1342                 }
1343         }
1344         if (ceph_oid_empty(&t->target_oid) || force_resend) {
1345                 ceph_oid_copy(&t->target_oid, &t->base_oid);
1346                 need_check_tiering = true;
1347         }
1348         if (ceph_oloc_empty(&t->target_oloc) || force_resend) {
1349                 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1350                 need_check_tiering = true;
1351         }
1352
1353         if (need_check_tiering &&
1354             (t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1355                 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1356                         t->target_oloc.pool = pi->read_tier;
1357                 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1358                         t->target_oloc.pool = pi->write_tier;
1359         }
1360
1361         ret = ceph_object_locator_to_pg(osdc->osdmap, &t->target_oid,
1362                                         &t->target_oloc, &pgid);
1363         if (ret) {
1364                 WARN_ON(ret != -ENOENT);
1365                 t->osd = CEPH_HOMELESS_OSD;
1366                 ct_res = CALC_TARGET_POOL_DNE;
1367                 goto out;
1368         }
1369         last_pgid.pool = pgid.pool;
1370         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1371
1372         ceph_pg_to_up_acting_osds(osdc->osdmap, &pgid, &up, &acting);
1373         if (any_change &&
1374             ceph_is_new_interval(&t->acting,
1375                                  &acting,
1376                                  &t->up,
1377                                  &up,
1378                                  t->size,
1379                                  pi->size,
1380                                  t->min_size,
1381                                  pi->min_size,
1382                                  t->pg_num,
1383                                  pi->pg_num,
1384                                  t->sort_bitwise,
1385                                  sort_bitwise,
1386                                  &last_pgid))
1387                 force_resend = true;
1388
1389         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1390                 t->paused = false;
1391                 need_resend = true;
1392         }
1393
1394         if (ceph_pg_compare(&t->pgid, &pgid) ||
1395             ceph_osds_changed(&t->acting, &acting, any_change) ||
1396             force_resend) {
1397                 t->pgid = pgid; /* struct */
1398                 ceph_pg_to_primary_shard(osdc->osdmap, &pgid, &t->spgid);
1399                 ceph_osds_copy(&t->acting, &acting);
1400                 ceph_osds_copy(&t->up, &up);
1401                 t->size = pi->size;
1402                 t->min_size = pi->min_size;
1403                 t->pg_num = pi->pg_num;
1404                 t->pg_num_mask = pi->pg_num_mask;
1405                 t->sort_bitwise = sort_bitwise;
1406
1407                 t->osd = acting.primary;
1408                 need_resend = true;
1409         }
1410
1411         ct_res = need_resend ? CALC_TARGET_NEED_RESEND : CALC_TARGET_NO_ACTION;
1412 out:
1413         dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1414         return ct_res;
1415 }
1416
1417 static void setup_request_data(struct ceph_osd_request *req,
1418                                struct ceph_msg *msg)
1419 {
1420         u32 data_len = 0;
1421         int i;
1422
1423         if (!list_empty(&msg->data))
1424                 return;
1425
1426         WARN_ON(msg->data_length);
1427         for (i = 0; i < req->r_num_ops; i++) {
1428                 struct ceph_osd_req_op *op = &req->r_ops[i];
1429
1430                 switch (op->op) {
1431                 /* request */
1432                 case CEPH_OSD_OP_WRITE:
1433                 case CEPH_OSD_OP_WRITEFULL:
1434                         WARN_ON(op->indata_len != op->extent.length);
1435                         ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1436                         break;
1437                 case CEPH_OSD_OP_SETXATTR:
1438                 case CEPH_OSD_OP_CMPXATTR:
1439                         WARN_ON(op->indata_len != op->xattr.name_len +
1440                                                   op->xattr.value_len);
1441                         ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1442                         break;
1443                 case CEPH_OSD_OP_NOTIFY_ACK:
1444                         ceph_osdc_msg_data_add(msg,
1445                                                &op->notify_ack.request_data);
1446                         break;
1447
1448                 /* reply */
1449                 case CEPH_OSD_OP_STAT:
1450                         ceph_osdc_msg_data_add(req->r_reply,
1451                                                &op->raw_data_in);
1452                         break;
1453                 case CEPH_OSD_OP_READ:
1454                         ceph_osdc_msg_data_add(req->r_reply,
1455                                                &op->extent.osd_data);
1456                         break;
1457                 case CEPH_OSD_OP_LIST_WATCHERS:
1458                         ceph_osdc_msg_data_add(req->r_reply,
1459                                                &op->list_watchers.response_data);
1460                         break;
1461
1462                 /* both */
1463                 case CEPH_OSD_OP_CALL:
1464                         WARN_ON(op->indata_len != op->cls.class_len +
1465                                                   op->cls.method_len +
1466                                                   op->cls.indata_len);
1467                         ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1468                         /* optional, can be NONE */
1469                         ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1470                         /* optional, can be NONE */
1471                         ceph_osdc_msg_data_add(req->r_reply,
1472                                                &op->cls.response_data);
1473                         break;
1474                 case CEPH_OSD_OP_NOTIFY:
1475                         ceph_osdc_msg_data_add(msg,
1476                                                &op->notify.request_data);
1477                         ceph_osdc_msg_data_add(req->r_reply,
1478                                                &op->notify.response_data);
1479                         break;
1480                 }
1481
1482                 data_len += op->indata_len;
1483         }
1484
1485         WARN_ON(data_len != msg->data_length);
1486 }
1487
1488 static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1489 {
1490         void *p = msg->front.iov_base;
1491         void *const end = p + msg->front_alloc_len;
1492         u32 data_len = 0;
1493         int i;
1494
1495         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1496                 /* snapshots aren't writeable */
1497                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1498         } else {
1499                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1500                         req->r_data_offset || req->r_snapc);
1501         }
1502
1503         setup_request_data(req, msg);
1504
1505         ceph_encode_32(&p, 1); /* client_inc, always 1 */
1506         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1507         ceph_encode_32(&p, req->r_flags);
1508         ceph_encode_timespec(p, &req->r_mtime);
1509         p += sizeof(struct ceph_timespec);
1510
1511         /* reassert_version */
1512         memset(p, 0, sizeof(struct ceph_eversion));
1513         p += sizeof(struct ceph_eversion);
1514
1515         /* oloc */
1516         ceph_start_encoding(&p, 5, 4,
1517                             ceph_oloc_encoding_size(&req->r_t.target_oloc));
1518         ceph_encode_64(&p, req->r_t.target_oloc.pool);
1519         ceph_encode_32(&p, -1); /* preferred */
1520         ceph_encode_32(&p, 0); /* key len */
1521         if (req->r_t.target_oloc.pool_ns)
1522                 ceph_encode_string(&p, end, req->r_t.target_oloc.pool_ns->str,
1523                                    req->r_t.target_oloc.pool_ns->len);
1524         else
1525                 ceph_encode_32(&p, 0);
1526
1527         /* pgid */
1528         ceph_encode_8(&p, 1);
1529         ceph_encode_64(&p, req->r_t.pgid.pool);
1530         ceph_encode_32(&p, req->r_t.pgid.seed);
1531         ceph_encode_32(&p, -1); /* preferred */
1532
1533         /* oid */
1534         ceph_encode_32(&p, req->r_t.target_oid.name_len);
1535         memcpy(p, req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1536         p += req->r_t.target_oid.name_len;
1537
1538         /* ops, can imply data */
1539         ceph_encode_16(&p, req->r_num_ops);
1540         for (i = 0; i < req->r_num_ops; i++) {
1541                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1542                 p += sizeof(struct ceph_osd_op);
1543         }
1544
1545         ceph_encode_64(&p, req->r_snapid); /* snapid */
1546         if (req->r_snapc) {
1547                 ceph_encode_64(&p, req->r_snapc->seq);
1548                 ceph_encode_32(&p, req->r_snapc->num_snaps);
1549                 for (i = 0; i < req->r_snapc->num_snaps; i++)
1550                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
1551         } else {
1552                 ceph_encode_64(&p, 0); /* snap_seq */
1553                 ceph_encode_32(&p, 0); /* snaps len */
1554         }
1555
1556         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1557
1558         BUG_ON(p > end);
1559         msg->front.iov_len = p - msg->front.iov_base;
1560         msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
1561         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1562         msg->hdr.data_len = cpu_to_le32(data_len);
1563         /*
1564          * The header "data_off" is a hint to the receiver allowing it
1565          * to align received data into its buffers such that there's no
1566          * need to re-copy it before writing it to disk (direct I/O).
1567          */
1568         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1569
1570         dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
1571              req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
1572              msg->front.iov_len, data_len);
1573 }
1574
1575 /*
1576  * @req has to be assigned a tid and registered.
1577  */
1578 static void send_request(struct ceph_osd_request *req)
1579 {
1580         struct ceph_osd *osd = req->r_osd;
1581
1582         verify_osd_locked(osd);
1583         WARN_ON(osd->o_osd != req->r_t.osd);
1584
1585         /*
1586          * We may have a previously queued request message hanging
1587          * around.  Cancel it to avoid corrupting the msgr.
1588          */
1589         if (req->r_sent)
1590                 ceph_msg_revoke(req->r_request);
1591
1592         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
1593         if (req->r_attempts)
1594                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1595         else
1596                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
1597
1598         encode_request(req, req->r_request);
1599
1600         dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d flags 0x%x attempt %d\n",
1601              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
1602              req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
1603              req->r_t.spgid.shard, osd->o_osd, req->r_flags, req->r_attempts);
1604
1605         req->r_t.paused = false;
1606         req->r_stamp = jiffies;
1607         req->r_attempts++;
1608
1609         req->r_sent = osd->o_incarnation;
1610         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1611         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
1612 }
1613
1614 static void maybe_request_map(struct ceph_osd_client *osdc)
1615 {
1616         bool continuous = false;
1617
1618         verify_osdc_locked(osdc);
1619         WARN_ON(!osdc->osdmap->epoch);
1620
1621         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1622             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
1623             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1624                 dout("%s osdc %p continuous\n", __func__, osdc);
1625                 continuous = true;
1626         } else {
1627                 dout("%s osdc %p onetime\n", __func__, osdc);
1628         }
1629
1630         if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
1631                                osdc->osdmap->epoch + 1, continuous))
1632                 ceph_monc_renew_subs(&osdc->client->monc);
1633 }
1634
1635 static void complete_request(struct ceph_osd_request *req, int err);
1636 static void send_map_check(struct ceph_osd_request *req);
1637
1638 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
1639 {
1640         struct ceph_osd_client *osdc = req->r_osdc;
1641         struct ceph_osd *osd;
1642         enum calc_target_result ct_res;
1643         bool need_send = false;
1644         bool promoted = false;
1645         bool need_abort = false;
1646
1647         WARN_ON(req->r_tid);
1648         dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
1649
1650 again:
1651         ct_res = calc_target(osdc, &req->r_t, false);
1652         if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
1653                 goto promote;
1654
1655         osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
1656         if (IS_ERR(osd)) {
1657                 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
1658                 goto promote;
1659         }
1660
1661         if (osdc->osdmap->epoch < osdc->epoch_barrier) {
1662                 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
1663                      osdc->epoch_barrier);
1664                 req->r_t.paused = true;
1665                 maybe_request_map(osdc);
1666         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1667                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1668                 dout("req %p pausewr\n", req);
1669                 req->r_t.paused = true;
1670                 maybe_request_map(osdc);
1671         } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1672                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
1673                 dout("req %p pauserd\n", req);
1674                 req->r_t.paused = true;
1675                 maybe_request_map(osdc);
1676         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1677                    !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
1678                                      CEPH_OSD_FLAG_FULL_FORCE)) &&
1679                    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1680                     pool_full(osdc, req->r_t.base_oloc.pool))) {
1681                 dout("req %p full/pool_full\n", req);
1682                 pr_warn_ratelimited("FULL or reached pool quota\n");
1683                 req->r_t.paused = true;
1684                 maybe_request_map(osdc);
1685                 if (req->r_abort_on_full)
1686                         need_abort = true;
1687         } else if (!osd_homeless(osd)) {
1688                 need_send = true;
1689         } else {
1690                 maybe_request_map(osdc);
1691         }
1692
1693         mutex_lock(&osd->lock);
1694         /*
1695          * Assign the tid atomically with send_request() to protect
1696          * multiple writes to the same object from racing with each
1697          * other, resulting in out of order ops on the OSDs.
1698          */
1699         req->r_tid = atomic64_inc_return(&osdc->last_tid);
1700         link_request(osd, req);
1701         if (need_send)
1702                 send_request(req);
1703         else if (need_abort)
1704                 complete_request(req, -ENOSPC);
1705         mutex_unlock(&osd->lock);
1706
1707         if (ct_res == CALC_TARGET_POOL_DNE)
1708                 send_map_check(req);
1709
1710         if (promoted)
1711                 downgrade_write(&osdc->lock);
1712         return;
1713
1714 promote:
1715         up_read(&osdc->lock);
1716         down_write(&osdc->lock);
1717         wrlocked = true;
1718         promoted = true;
1719         goto again;
1720 }
1721
1722 static void account_request(struct ceph_osd_request *req)
1723 {
1724         WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
1725         WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
1726
1727         req->r_flags |= CEPH_OSD_FLAG_ONDISK;
1728         atomic_inc(&req->r_osdc->num_requests);
1729
1730         req->r_start_stamp = jiffies;
1731 }
1732
1733 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
1734 {
1735         ceph_osdc_get_request(req);
1736         account_request(req);
1737         __submit_request(req, wrlocked);
1738 }
1739
1740 static void finish_request(struct ceph_osd_request *req)
1741 {
1742         struct ceph_osd_client *osdc = req->r_osdc;
1743         struct ceph_osd *osd = req->r_osd;
1744
1745         verify_osd_locked(osd);
1746         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1747
1748         WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
1749         unlink_request(osd, req);
1750         atomic_dec(&osdc->num_requests);
1751
1752         /*
1753          * If an OSD has failed or returned and a request has been sent
1754          * twice, it's possible to get a reply and end up here while the
1755          * request message is queued for delivery.  We will ignore the
1756          * reply, so not a big deal, but better to try and catch it.
1757          */
1758         ceph_msg_revoke(req->r_request);
1759         ceph_msg_revoke_incoming(req->r_reply);
1760 }
1761
1762 static void __complete_request(struct ceph_osd_request *req)
1763 {
1764         if (req->r_callback) {
1765                 dout("%s req %p tid %llu cb %pf result %d\n", __func__, req,
1766                      req->r_tid, req->r_callback, req->r_result);
1767                 req->r_callback(req);
1768         }
1769 }
1770
1771 /*
1772  * This is open-coded in handle_reply().
1773  */
1774 static void complete_request(struct ceph_osd_request *req, int err)
1775 {
1776         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
1777
1778         req->r_result = err;
1779         finish_request(req);
1780         __complete_request(req);
1781         complete_all(&req->r_completion);
1782         ceph_osdc_put_request(req);
1783 }
1784
1785 static void cancel_map_check(struct ceph_osd_request *req)
1786 {
1787         struct ceph_osd_client *osdc = req->r_osdc;
1788         struct ceph_osd_request *lookup_req;
1789
1790         verify_osdc_wrlocked(osdc);
1791
1792         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1793         if (!lookup_req)
1794                 return;
1795
1796         WARN_ON(lookup_req != req);
1797         erase_request_mc(&osdc->map_checks, req);
1798         ceph_osdc_put_request(req);
1799 }
1800
1801 static void cancel_request(struct ceph_osd_request *req)
1802 {
1803         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1804
1805         cancel_map_check(req);
1806         finish_request(req);
1807         complete_all(&req->r_completion);
1808         ceph_osdc_put_request(req);
1809 }
1810
1811 static void abort_request(struct ceph_osd_request *req, int err)
1812 {
1813         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
1814
1815         cancel_map_check(req);
1816         complete_request(req, err);
1817 }
1818
1819 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
1820 {
1821         if (likely(eb > osdc->epoch_barrier)) {
1822                 dout("updating epoch_barrier from %u to %u\n",
1823                                 osdc->epoch_barrier, eb);
1824                 osdc->epoch_barrier = eb;
1825                 /* Request map if we're not to the barrier yet */
1826                 if (eb > osdc->osdmap->epoch)
1827                         maybe_request_map(osdc);
1828         }
1829 }
1830
1831 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
1832 {
1833         down_read(&osdc->lock);
1834         if (unlikely(eb > osdc->epoch_barrier)) {
1835                 up_read(&osdc->lock);
1836                 down_write(&osdc->lock);
1837                 update_epoch_barrier(osdc, eb);
1838                 up_write(&osdc->lock);
1839         } else {
1840                 up_read(&osdc->lock);
1841         }
1842 }
1843 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
1844
1845 /*
1846  * Drop all pending requests that are stalled waiting on a full condition to
1847  * clear, and complete them with ENOSPC as the return code. Set the
1848  * osdc->epoch_barrier to the latest map epoch that we've seen if any were
1849  * cancelled.
1850  */
1851 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
1852 {
1853         struct rb_node *n;
1854         bool victims = false;
1855
1856         dout("enter abort_on_full\n");
1857
1858         if (!ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && !have_pool_full(osdc))
1859                 goto out;
1860
1861         /* Scan list and see if there is anything to abort */
1862         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1863                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1864                 struct rb_node *m;
1865
1866                 m = rb_first(&osd->o_requests);
1867                 while (m) {
1868                         struct ceph_osd_request *req = rb_entry(m,
1869                                         struct ceph_osd_request, r_node);
1870                         m = rb_next(m);
1871
1872                         if (req->r_abort_on_full) {
1873                                 victims = true;
1874                                 break;
1875                         }
1876                 }
1877                 if (victims)
1878                         break;
1879         }
1880
1881         if (!victims)
1882                 goto out;
1883
1884         /*
1885          * Update the barrier to current epoch if it's behind that point,
1886          * since we know we have some calls to be aborted in the tree.
1887          */
1888         update_epoch_barrier(osdc, osdc->osdmap->epoch);
1889
1890         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1891                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1892                 struct rb_node *m;
1893
1894                 m = rb_first(&osd->o_requests);
1895                 while (m) {
1896                         struct ceph_osd_request *req = rb_entry(m,
1897                                         struct ceph_osd_request, r_node);
1898                         m = rb_next(m);
1899
1900                         if (req->r_abort_on_full &&
1901                             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1902                              pool_full(osdc, req->r_t.target_oloc.pool)))
1903                                 abort_request(req, -ENOSPC);
1904                 }
1905         }
1906 out:
1907         dout("return abort_on_full barrier=%u\n", osdc->epoch_barrier);
1908 }
1909
1910 static void check_pool_dne(struct ceph_osd_request *req)
1911 {
1912         struct ceph_osd_client *osdc = req->r_osdc;
1913         struct ceph_osdmap *map = osdc->osdmap;
1914
1915         verify_osdc_wrlocked(osdc);
1916         WARN_ON(!map->epoch);
1917
1918         if (req->r_attempts) {
1919                 /*
1920                  * We sent a request earlier, which means that
1921                  * previously the pool existed, and now it does not
1922                  * (i.e., it was deleted).
1923                  */
1924                 req->r_map_dne_bound = map->epoch;
1925                 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
1926                      req->r_tid);
1927         } else {
1928                 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
1929                      req, req->r_tid, req->r_map_dne_bound, map->epoch);
1930         }
1931
1932         if (req->r_map_dne_bound) {
1933                 if (map->epoch >= req->r_map_dne_bound) {
1934                         /* we had a new enough map */
1935                         pr_info_ratelimited("tid %llu pool does not exist\n",
1936                                             req->r_tid);
1937                         complete_request(req, -ENOENT);
1938                 }
1939         } else {
1940                 send_map_check(req);
1941         }
1942 }
1943
1944 static void map_check_cb(struct ceph_mon_generic_request *greq)
1945 {
1946         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
1947         struct ceph_osd_request *req;
1948         u64 tid = greq->private_data;
1949
1950         WARN_ON(greq->result || !greq->u.newest);
1951
1952         down_write(&osdc->lock);
1953         req = lookup_request_mc(&osdc->map_checks, tid);
1954         if (!req) {
1955                 dout("%s tid %llu dne\n", __func__, tid);
1956                 goto out_unlock;
1957         }
1958
1959         dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
1960              req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
1961         if (!req->r_map_dne_bound)
1962                 req->r_map_dne_bound = greq->u.newest;
1963         erase_request_mc(&osdc->map_checks, req);
1964         check_pool_dne(req);
1965
1966         ceph_osdc_put_request(req);
1967 out_unlock:
1968         up_write(&osdc->lock);
1969 }
1970
1971 static void send_map_check(struct ceph_osd_request *req)
1972 {
1973         struct ceph_osd_client *osdc = req->r_osdc;
1974         struct ceph_osd_request *lookup_req;
1975         int ret;
1976
1977         verify_osdc_wrlocked(osdc);
1978
1979         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1980         if (lookup_req) {
1981                 WARN_ON(lookup_req != req);
1982                 return;
1983         }
1984
1985         ceph_osdc_get_request(req);
1986         insert_request_mc(&osdc->map_checks, req);
1987         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
1988                                           map_check_cb, req->r_tid);
1989         WARN_ON(ret);
1990 }
1991
1992 /*
1993  * lingering requests, watch/notify v2 infrastructure
1994  */
1995 static void linger_release(struct kref *kref)
1996 {
1997         struct ceph_osd_linger_request *lreq =
1998             container_of(kref, struct ceph_osd_linger_request, kref);
1999
2000         dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2001              lreq->reg_req, lreq->ping_req);
2002         WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2003         WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2004         WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2005         WARN_ON(!list_empty(&lreq->scan_item));
2006         WARN_ON(!list_empty(&lreq->pending_lworks));
2007         WARN_ON(lreq->osd);
2008
2009         if (lreq->reg_req)
2010                 ceph_osdc_put_request(lreq->reg_req);
2011         if (lreq->ping_req)
2012                 ceph_osdc_put_request(lreq->ping_req);
2013         target_destroy(&lreq->t);
2014         kfree(lreq);
2015 }
2016
2017 static void linger_put(struct ceph_osd_linger_request *lreq)
2018 {
2019         if (lreq)
2020                 kref_put(&lreq->kref, linger_release);
2021 }
2022
2023 static struct ceph_osd_linger_request *
2024 linger_get(struct ceph_osd_linger_request *lreq)
2025 {
2026         kref_get(&lreq->kref);
2027         return lreq;
2028 }
2029
2030 static struct ceph_osd_linger_request *
2031 linger_alloc(struct ceph_osd_client *osdc)
2032 {
2033         struct ceph_osd_linger_request *lreq;
2034
2035         lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2036         if (!lreq)
2037                 return NULL;
2038
2039         kref_init(&lreq->kref);
2040         mutex_init(&lreq->lock);
2041         RB_CLEAR_NODE(&lreq->node);
2042         RB_CLEAR_NODE(&lreq->osdc_node);
2043         RB_CLEAR_NODE(&lreq->mc_node);
2044         INIT_LIST_HEAD(&lreq->scan_item);
2045         INIT_LIST_HEAD(&lreq->pending_lworks);
2046         init_completion(&lreq->reg_commit_wait);
2047         init_completion(&lreq->notify_finish_wait);
2048
2049         lreq->osdc = osdc;
2050         target_init(&lreq->t);
2051
2052         dout("%s lreq %p\n", __func__, lreq);
2053         return lreq;
2054 }
2055
2056 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2057 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2058 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2059
2060 /*
2061  * Create linger request <-> OSD session relation.
2062  *
2063  * @lreq has to be registered, @osd may be homeless.
2064  */
2065 static void link_linger(struct ceph_osd *osd,
2066                         struct ceph_osd_linger_request *lreq)
2067 {
2068         verify_osd_locked(osd);
2069         WARN_ON(!lreq->linger_id || lreq->osd);
2070         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2071              osd->o_osd, lreq, lreq->linger_id);
2072
2073         if (!osd_homeless(osd))
2074                 __remove_osd_from_lru(osd);
2075         else
2076                 atomic_inc(&osd->o_osdc->num_homeless);
2077
2078         get_osd(osd);
2079         insert_linger(&osd->o_linger_requests, lreq);
2080         lreq->osd = osd;
2081 }
2082
2083 static void unlink_linger(struct ceph_osd *osd,
2084                           struct ceph_osd_linger_request *lreq)
2085 {
2086         verify_osd_locked(osd);
2087         WARN_ON(lreq->osd != osd);
2088         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2089              osd->o_osd, lreq, lreq->linger_id);
2090
2091         lreq->osd = NULL;
2092         erase_linger(&osd->o_linger_requests, lreq);
2093         put_osd(osd);
2094
2095         if (!osd_homeless(osd))
2096                 maybe_move_osd_to_lru(osd);
2097         else
2098                 atomic_dec(&osd->o_osdc->num_homeless);
2099 }
2100
2101 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2102 {
2103         verify_osdc_locked(lreq->osdc);
2104
2105         return !RB_EMPTY_NODE(&lreq->osdc_node);
2106 }
2107
2108 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2109 {
2110         struct ceph_osd_client *osdc = lreq->osdc;
2111         bool registered;
2112
2113         down_read(&osdc->lock);
2114         registered = __linger_registered(lreq);
2115         up_read(&osdc->lock);
2116
2117         return registered;
2118 }
2119
2120 static void linger_register(struct ceph_osd_linger_request *lreq)
2121 {
2122         struct ceph_osd_client *osdc = lreq->osdc;
2123
2124         verify_osdc_wrlocked(osdc);
2125         WARN_ON(lreq->linger_id);
2126
2127         linger_get(lreq);
2128         lreq->linger_id = ++osdc->last_linger_id;
2129         insert_linger_osdc(&osdc->linger_requests, lreq);
2130 }
2131
2132 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2133 {
2134         struct ceph_osd_client *osdc = lreq->osdc;
2135
2136         verify_osdc_wrlocked(osdc);
2137
2138         erase_linger_osdc(&osdc->linger_requests, lreq);
2139         linger_put(lreq);
2140 }
2141
2142 static void cancel_linger_request(struct ceph_osd_request *req)
2143 {
2144         struct ceph_osd_linger_request *lreq = req->r_priv;
2145
2146         WARN_ON(!req->r_linger);
2147         cancel_request(req);
2148         linger_put(lreq);
2149 }
2150
2151 struct linger_work {
2152         struct work_struct work;
2153         struct ceph_osd_linger_request *lreq;
2154         struct list_head pending_item;
2155         unsigned long queued_stamp;
2156
2157         union {
2158                 struct {
2159                         u64 notify_id;
2160                         u64 notifier_id;
2161                         void *payload; /* points into @msg front */
2162                         size_t payload_len;
2163
2164                         struct ceph_msg *msg; /* for ceph_msg_put() */
2165                 } notify;
2166                 struct {
2167                         int err;
2168                 } error;
2169         };
2170 };
2171
2172 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2173                                        work_func_t workfn)
2174 {
2175         struct linger_work *lwork;
2176
2177         lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2178         if (!lwork)
2179                 return NULL;
2180
2181         INIT_WORK(&lwork->work, workfn);
2182         INIT_LIST_HEAD(&lwork->pending_item);
2183         lwork->lreq = linger_get(lreq);
2184
2185         return lwork;
2186 }
2187
2188 static void lwork_free(struct linger_work *lwork)
2189 {
2190         struct ceph_osd_linger_request *lreq = lwork->lreq;
2191
2192         mutex_lock(&lreq->lock);
2193         list_del(&lwork->pending_item);
2194         mutex_unlock(&lreq->lock);
2195
2196         linger_put(lreq);
2197         kfree(lwork);
2198 }
2199
2200 static void lwork_queue(struct linger_work *lwork)
2201 {
2202         struct ceph_osd_linger_request *lreq = lwork->lreq;
2203         struct ceph_osd_client *osdc = lreq->osdc;
2204
2205         verify_lreq_locked(lreq);
2206         WARN_ON(!list_empty(&lwork->pending_item));
2207
2208         lwork->queued_stamp = jiffies;
2209         list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2210         queue_work(osdc->notify_wq, &lwork->work);
2211 }
2212
2213 static void do_watch_notify(struct work_struct *w)
2214 {
2215         struct linger_work *lwork = container_of(w, struct linger_work, work);
2216         struct ceph_osd_linger_request *lreq = lwork->lreq;
2217
2218         if (!linger_registered(lreq)) {
2219                 dout("%s lreq %p not registered\n", __func__, lreq);
2220                 goto out;
2221         }
2222
2223         WARN_ON(!lreq->is_watch);
2224         dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2225              __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2226              lwork->notify.payload_len);
2227         lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2228                   lwork->notify.notifier_id, lwork->notify.payload,
2229                   lwork->notify.payload_len);
2230
2231 out:
2232         ceph_msg_put(lwork->notify.msg);
2233         lwork_free(lwork);
2234 }
2235
2236 static void do_watch_error(struct work_struct *w)
2237 {
2238         struct linger_work *lwork = container_of(w, struct linger_work, work);
2239         struct ceph_osd_linger_request *lreq = lwork->lreq;
2240
2241         if (!linger_registered(lreq)) {
2242                 dout("%s lreq %p not registered\n", __func__, lreq);
2243                 goto out;
2244         }
2245
2246         dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2247         lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2248
2249 out:
2250         lwork_free(lwork);
2251 }
2252
2253 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2254 {
2255         struct linger_work *lwork;
2256
2257         lwork = lwork_alloc(lreq, do_watch_error);
2258         if (!lwork) {
2259                 pr_err("failed to allocate error-lwork\n");
2260                 return;
2261         }
2262
2263         lwork->error.err = lreq->last_error;
2264         lwork_queue(lwork);
2265 }
2266
2267 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2268                                        int result)
2269 {
2270         if (!completion_done(&lreq->reg_commit_wait)) {
2271                 lreq->reg_commit_error = (result <= 0 ? result : 0);
2272                 complete_all(&lreq->reg_commit_wait);
2273         }
2274 }
2275
2276 static void linger_commit_cb(struct ceph_osd_request *req)
2277 {
2278         struct ceph_osd_linger_request *lreq = req->r_priv;
2279
2280         mutex_lock(&lreq->lock);
2281         dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2282              lreq->linger_id, req->r_result);
2283         linger_reg_commit_complete(lreq, req->r_result);
2284         lreq->committed = true;
2285
2286         if (!lreq->is_watch) {
2287                 struct ceph_osd_data *osd_data =
2288                     osd_req_op_data(req, 0, notify, response_data);
2289                 void *p = page_address(osd_data->pages[0]);
2290
2291                 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2292                         osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2293
2294                 /* make note of the notify_id */
2295                 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2296                         lreq->notify_id = ceph_decode_64(&p);
2297                         dout("lreq %p notify_id %llu\n", lreq,
2298                              lreq->notify_id);
2299                 } else {
2300                         dout("lreq %p no notify_id\n", lreq);
2301                 }
2302         }
2303
2304         mutex_unlock(&lreq->lock);
2305         linger_put(lreq);
2306 }
2307
2308 static int normalize_watch_error(int err)
2309 {
2310         /*
2311          * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2312          * notification and a failure to reconnect because we raced with
2313          * the delete appear the same to the user.
2314          */
2315         if (err == -ENOENT)
2316                 err = -ENOTCONN;
2317
2318         return err;
2319 }
2320
2321 static void linger_reconnect_cb(struct ceph_osd_request *req)
2322 {
2323         struct ceph_osd_linger_request *lreq = req->r_priv;
2324
2325         mutex_lock(&lreq->lock);
2326         dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2327              lreq, lreq->linger_id, req->r_result, lreq->last_error);
2328         if (req->r_result < 0) {
2329                 if (!lreq->last_error) {
2330                         lreq->last_error = normalize_watch_error(req->r_result);
2331                         queue_watch_error(lreq);
2332                 }
2333         }
2334
2335         mutex_unlock(&lreq->lock);
2336         linger_put(lreq);
2337 }
2338
2339 static void send_linger(struct ceph_osd_linger_request *lreq)
2340 {
2341         struct ceph_osd_request *req = lreq->reg_req;
2342         struct ceph_osd_req_op *op = &req->r_ops[0];
2343
2344         verify_osdc_wrlocked(req->r_osdc);
2345         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2346
2347         if (req->r_osd)
2348                 cancel_linger_request(req);
2349
2350         request_reinit(req);
2351         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2352         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2353         req->r_flags = lreq->t.flags;
2354         req->r_mtime = lreq->mtime;
2355
2356         mutex_lock(&lreq->lock);
2357         if (lreq->is_watch && lreq->committed) {
2358                 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2359                         op->watch.cookie != lreq->linger_id);
2360                 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2361                 op->watch.gen = ++lreq->register_gen;
2362                 dout("lreq %p reconnect register_gen %u\n", lreq,
2363                      op->watch.gen);
2364                 req->r_callback = linger_reconnect_cb;
2365         } else {
2366                 if (!lreq->is_watch)
2367                         lreq->notify_id = 0;
2368                 else
2369                         WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2370                 dout("lreq %p register\n", lreq);
2371                 req->r_callback = linger_commit_cb;
2372         }
2373         mutex_unlock(&lreq->lock);
2374
2375         req->r_priv = linger_get(lreq);
2376         req->r_linger = true;
2377
2378         submit_request(req, true);
2379 }
2380
2381 static void linger_ping_cb(struct ceph_osd_request *req)
2382 {
2383         struct ceph_osd_linger_request *lreq = req->r_priv;
2384
2385         mutex_lock(&lreq->lock);
2386         dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2387              __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2388              lreq->last_error);
2389         if (lreq->register_gen == req->r_ops[0].watch.gen) {
2390                 if (!req->r_result) {
2391                         lreq->watch_valid_thru = lreq->ping_sent;
2392                 } else if (!lreq->last_error) {
2393                         lreq->last_error = normalize_watch_error(req->r_result);
2394                         queue_watch_error(lreq);
2395                 }
2396         } else {
2397                 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2398                      lreq->register_gen, req->r_ops[0].watch.gen);
2399         }
2400
2401         mutex_unlock(&lreq->lock);
2402         linger_put(lreq);
2403 }
2404
2405 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2406 {
2407         struct ceph_osd_client *osdc = lreq->osdc;
2408         struct ceph_osd_request *req = lreq->ping_req;
2409         struct ceph_osd_req_op *op = &req->r_ops[0];
2410
2411         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2412                 dout("%s PAUSERD\n", __func__);
2413                 return;
2414         }
2415
2416         lreq->ping_sent = jiffies;
2417         dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2418              __func__, lreq, lreq->linger_id, lreq->ping_sent,
2419              lreq->register_gen);
2420
2421         if (req->r_osd)
2422                 cancel_linger_request(req);
2423
2424         request_reinit(req);
2425         target_copy(&req->r_t, &lreq->t);
2426
2427         WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2428                 op->watch.cookie != lreq->linger_id ||
2429                 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2430         op->watch.gen = lreq->register_gen;
2431         req->r_callback = linger_ping_cb;
2432         req->r_priv = linger_get(lreq);
2433         req->r_linger = true;
2434
2435         ceph_osdc_get_request(req);
2436         account_request(req);
2437         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2438         link_request(lreq->osd, req);
2439         send_request(req);
2440 }
2441
2442 static void linger_submit(struct ceph_osd_linger_request *lreq)
2443 {
2444         struct ceph_osd_client *osdc = lreq->osdc;
2445         struct ceph_osd *osd;
2446
2447         calc_target(osdc, &lreq->t, false);
2448         osd = lookup_create_osd(osdc, lreq->t.osd, true);
2449         link_linger(osd, lreq);
2450
2451         send_linger(lreq);
2452 }
2453
2454 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2455 {
2456         struct ceph_osd_client *osdc = lreq->osdc;
2457         struct ceph_osd_linger_request *lookup_lreq;
2458
2459         verify_osdc_wrlocked(osdc);
2460
2461         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2462                                        lreq->linger_id);
2463         if (!lookup_lreq)
2464                 return;
2465
2466         WARN_ON(lookup_lreq != lreq);
2467         erase_linger_mc(&osdc->linger_map_checks, lreq);
2468         linger_put(lreq);
2469 }
2470
2471 /*
2472  * @lreq has to be both registered and linked.
2473  */
2474 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2475 {
2476         if (lreq->is_watch && lreq->ping_req->r_osd)
2477                 cancel_linger_request(lreq->ping_req);
2478         if (lreq->reg_req->r_osd)
2479                 cancel_linger_request(lreq->reg_req);
2480         cancel_linger_map_check(lreq);
2481         unlink_linger(lreq->osd, lreq);
2482         linger_unregister(lreq);
2483 }
2484
2485 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2486 {
2487         struct ceph_osd_client *osdc = lreq->osdc;
2488
2489         down_write(&osdc->lock);
2490         if (__linger_registered(lreq))
2491                 __linger_cancel(lreq);
2492         up_write(&osdc->lock);
2493 }
2494
2495 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2496
2497 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
2498 {
2499         struct ceph_osd_client *osdc = lreq->osdc;
2500         struct ceph_osdmap *map = osdc->osdmap;
2501
2502         verify_osdc_wrlocked(osdc);
2503         WARN_ON(!map->epoch);
2504
2505         if (lreq->register_gen) {
2506                 lreq->map_dne_bound = map->epoch;
2507                 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
2508                      lreq, lreq->linger_id);
2509         } else {
2510                 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2511                      __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2512                      map->epoch);
2513         }
2514
2515         if (lreq->map_dne_bound) {
2516                 if (map->epoch >= lreq->map_dne_bound) {
2517                         /* we had a new enough map */
2518                         pr_info("linger_id %llu pool does not exist\n",
2519                                 lreq->linger_id);
2520                         linger_reg_commit_complete(lreq, -ENOENT);
2521                         __linger_cancel(lreq);
2522                 }
2523         } else {
2524                 send_linger_map_check(lreq);
2525         }
2526 }
2527
2528 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
2529 {
2530         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2531         struct ceph_osd_linger_request *lreq;
2532         u64 linger_id = greq->private_data;
2533
2534         WARN_ON(greq->result || !greq->u.newest);
2535
2536         down_write(&osdc->lock);
2537         lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
2538         if (!lreq) {
2539                 dout("%s linger_id %llu dne\n", __func__, linger_id);
2540                 goto out_unlock;
2541         }
2542
2543         dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
2544              __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2545              greq->u.newest);
2546         if (!lreq->map_dne_bound)
2547                 lreq->map_dne_bound = greq->u.newest;
2548         erase_linger_mc(&osdc->linger_map_checks, lreq);
2549         check_linger_pool_dne(lreq);
2550
2551         linger_put(lreq);
2552 out_unlock:
2553         up_write(&osdc->lock);
2554 }
2555
2556 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
2557 {
2558         struct ceph_osd_client *osdc = lreq->osdc;
2559         struct ceph_osd_linger_request *lookup_lreq;
2560         int ret;
2561
2562         verify_osdc_wrlocked(osdc);
2563
2564         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2565                                        lreq->linger_id);
2566         if (lookup_lreq) {
2567                 WARN_ON(lookup_lreq != lreq);
2568                 return;
2569         }
2570
2571         linger_get(lreq);
2572         insert_linger_mc(&osdc->linger_map_checks, lreq);
2573         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2574                                           linger_map_check_cb, lreq->linger_id);
2575         WARN_ON(ret);
2576 }
2577
2578 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
2579 {
2580         int ret;
2581
2582         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2583         ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
2584         return ret ?: lreq->reg_commit_error;
2585 }
2586
2587 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
2588 {
2589         int ret;
2590
2591         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2592         ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
2593         return ret ?: lreq->notify_finish_error;
2594 }
2595
2596 /*
2597  * Timeout callback, called every N seconds.  When 1 or more OSD
2598  * requests has been active for more than N seconds, we send a keepalive
2599  * (tag + timestamp) to its OSD to ensure any communications channel
2600  * reset is detected.
2601  */
2602 static void handle_timeout(struct work_struct *work)
2603 {
2604         struct ceph_osd_client *osdc =
2605                 container_of(work, struct ceph_osd_client, timeout_work.work);
2606         struct ceph_options *opts = osdc->client->options;
2607         unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
2608         unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
2609         LIST_HEAD(slow_osds);
2610         struct rb_node *n, *p;
2611
2612         dout("%s osdc %p\n", __func__, osdc);
2613         down_write(&osdc->lock);
2614
2615         /*
2616          * ping osds that are a bit slow.  this ensures that if there
2617          * is a break in the TCP connection we will notice, and reopen
2618          * a connection with that osd (from the fault callback).
2619          */
2620         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2621                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2622                 bool found = false;
2623
2624                 for (p = rb_first(&osd->o_requests); p; ) {
2625                         struct ceph_osd_request *req =
2626                             rb_entry(p, struct ceph_osd_request, r_node);
2627
2628                         p = rb_next(p); /* abort_request() */
2629
2630                         if (time_before(req->r_stamp, cutoff)) {
2631                                 dout(" req %p tid %llu on osd%d is laggy\n",
2632                                      req, req->r_tid, osd->o_osd);
2633                                 found = true;
2634                         }
2635                         if (opts->osd_request_timeout &&
2636                             time_before(req->r_start_stamp, expiry_cutoff)) {
2637                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
2638                                        req->r_tid, osd->o_osd);
2639                                 abort_request(req, -ETIMEDOUT);
2640                         }
2641                 }
2642                 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
2643                         struct ceph_osd_linger_request *lreq =
2644                             rb_entry(p, struct ceph_osd_linger_request, node);
2645
2646                         dout(" lreq %p linger_id %llu is served by osd%d\n",
2647                              lreq, lreq->linger_id, osd->o_osd);
2648                         found = true;
2649
2650                         mutex_lock(&lreq->lock);
2651                         if (lreq->is_watch && lreq->committed && !lreq->last_error)
2652                                 send_linger_ping(lreq);
2653                         mutex_unlock(&lreq->lock);
2654                 }
2655
2656                 if (found)
2657                         list_move_tail(&osd->o_keepalive_item, &slow_osds);
2658         }
2659
2660         if (opts->osd_request_timeout) {
2661                 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
2662                         struct ceph_osd_request *req =
2663                             rb_entry(p, struct ceph_osd_request, r_node);
2664
2665                         p = rb_next(p); /* abort_request() */
2666
2667                         if (time_before(req->r_start_stamp, expiry_cutoff)) {
2668                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
2669                                        req->r_tid, osdc->homeless_osd.o_osd);
2670                                 abort_request(req, -ETIMEDOUT);
2671                         }
2672                 }
2673         }
2674
2675         if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
2676                 maybe_request_map(osdc);
2677
2678         while (!list_empty(&slow_osds)) {
2679                 struct ceph_osd *osd = list_first_entry(&slow_osds,
2680                                                         struct ceph_osd,
2681                                                         o_keepalive_item);
2682                 list_del_init(&osd->o_keepalive_item);
2683                 ceph_con_keepalive(&osd->o_con);
2684         }
2685
2686         up_write(&osdc->lock);
2687         schedule_delayed_work(&osdc->timeout_work,
2688                               osdc->client->options->osd_keepalive_timeout);
2689 }
2690
2691 static void handle_osds_timeout(struct work_struct *work)
2692 {
2693         struct ceph_osd_client *osdc =
2694                 container_of(work, struct ceph_osd_client,
2695                              osds_timeout_work.work);
2696         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
2697         struct ceph_osd *osd, *nosd;
2698
2699         dout("%s osdc %p\n", __func__, osdc);
2700         down_write(&osdc->lock);
2701         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
2702                 if (time_before(jiffies, osd->lru_ttl))
2703                         break;
2704
2705                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
2706                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
2707                 close_osd(osd);
2708         }
2709
2710         up_write(&osdc->lock);
2711         schedule_delayed_work(&osdc->osds_timeout_work,
2712                               round_jiffies_relative(delay));
2713 }
2714
2715 static int ceph_oloc_decode(void **p, void *end,
2716                             struct ceph_object_locator *oloc)
2717 {
2718         u8 struct_v, struct_cv;
2719         u32 len;
2720         void *struct_end;
2721         int ret = 0;
2722
2723         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2724         struct_v = ceph_decode_8(p);
2725         struct_cv = ceph_decode_8(p);
2726         if (struct_v < 3) {
2727                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
2728                         struct_v, struct_cv);
2729                 goto e_inval;
2730         }
2731         if (struct_cv > 6) {
2732                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
2733                         struct_v, struct_cv);
2734                 goto e_inval;
2735         }
2736         len = ceph_decode_32(p);
2737         ceph_decode_need(p, end, len, e_inval);
2738         struct_end = *p + len;
2739
2740         oloc->pool = ceph_decode_64(p);
2741         *p += 4; /* skip preferred */
2742
2743         len = ceph_decode_32(p);
2744         if (len > 0) {
2745                 pr_warn("ceph_object_locator::key is set\n");
2746                 goto e_inval;
2747         }
2748
2749         if (struct_v >= 5) {
2750                 bool changed = false;
2751
2752                 len = ceph_decode_32(p);
2753                 if (len > 0) {
2754                         ceph_decode_need(p, end, len, e_inval);
2755                         if (!oloc->pool_ns ||
2756                             ceph_compare_string(oloc->pool_ns, *p, len))
2757                                 changed = true;
2758                         *p += len;
2759                 } else {
2760                         if (oloc->pool_ns)
2761                                 changed = true;
2762                 }
2763                 if (changed) {
2764                         /* redirect changes namespace */
2765                         pr_warn("ceph_object_locator::nspace is changed\n");
2766                         goto e_inval;
2767                 }
2768         }
2769
2770         if (struct_v >= 6) {
2771                 s64 hash = ceph_decode_64(p);
2772                 if (hash != -1) {
2773                         pr_warn("ceph_object_locator::hash is set\n");
2774                         goto e_inval;
2775                 }
2776         }
2777
2778         /* skip the rest */
2779         *p = struct_end;
2780 out:
2781         return ret;
2782
2783 e_inval:
2784         ret = -EINVAL;
2785         goto out;
2786 }
2787
2788 static int ceph_redirect_decode(void **p, void *end,
2789                                 struct ceph_request_redirect *redir)
2790 {
2791         u8 struct_v, struct_cv;
2792         u32 len;
2793         void *struct_end;
2794         int ret;
2795
2796         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2797         struct_v = ceph_decode_8(p);
2798         struct_cv = ceph_decode_8(p);
2799         if (struct_cv > 1) {
2800                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
2801                         struct_v, struct_cv);
2802                 goto e_inval;
2803         }
2804         len = ceph_decode_32(p);
2805         ceph_decode_need(p, end, len, e_inval);
2806         struct_end = *p + len;
2807
2808         ret = ceph_oloc_decode(p, end, &redir->oloc);
2809         if (ret)
2810                 goto out;
2811
2812         len = ceph_decode_32(p);
2813         if (len > 0) {
2814                 pr_warn("ceph_request_redirect::object_name is set\n");
2815                 goto e_inval;
2816         }
2817
2818         len = ceph_decode_32(p);
2819         *p += len; /* skip osd_instructions */
2820
2821         /* skip the rest */
2822         *p = struct_end;
2823 out:
2824         return ret;
2825
2826 e_inval:
2827         ret = -EINVAL;
2828         goto out;
2829 }
2830
2831 struct MOSDOpReply {
2832         struct ceph_pg pgid;
2833         u64 flags;
2834         int result;
2835         u32 epoch;
2836         int num_ops;
2837         u32 outdata_len[CEPH_OSD_MAX_OPS];
2838         s32 rval[CEPH_OSD_MAX_OPS];
2839         int retry_attempt;
2840         struct ceph_eversion replay_version;
2841         u64 user_version;
2842         struct ceph_request_redirect redirect;
2843 };
2844
2845 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
2846 {
2847         void *p = msg->front.iov_base;
2848         void *const end = p + msg->front.iov_len;
2849         u16 version = le16_to_cpu(msg->hdr.version);
2850         struct ceph_eversion bad_replay_version;
2851         u8 decode_redir;
2852         u32 len;
2853         int ret;
2854         int i;
2855
2856         ceph_decode_32_safe(&p, end, len, e_inval);
2857         ceph_decode_need(&p, end, len, e_inval);
2858         p += len; /* skip oid */
2859
2860         ret = ceph_decode_pgid(&p, end, &m->pgid);
2861         if (ret)
2862                 return ret;
2863
2864         ceph_decode_64_safe(&p, end, m->flags, e_inval);
2865         ceph_decode_32_safe(&p, end, m->result, e_inval);
2866         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
2867         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
2868         p += sizeof(bad_replay_version);
2869         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
2870
2871         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
2872         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
2873                 goto e_inval;
2874
2875         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
2876                          e_inval);
2877         for (i = 0; i < m->num_ops; i++) {
2878                 struct ceph_osd_op *op = p;
2879
2880                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
2881                 p += sizeof(*op);
2882         }
2883
2884         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
2885         for (i = 0; i < m->num_ops; i++)
2886                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
2887
2888         if (version >= 5) {
2889                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
2890                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
2891                 p += sizeof(m->replay_version);
2892                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
2893         } else {
2894                 m->replay_version = bad_replay_version; /* struct */
2895                 m->user_version = le64_to_cpu(m->replay_version.version);
2896         }
2897
2898         if (version >= 6) {
2899                 if (version >= 7)
2900                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
2901                 else
2902                         decode_redir = 1;
2903         } else {
2904                 decode_redir = 0;
2905         }
2906
2907         if (decode_redir) {
2908                 ret = ceph_redirect_decode(&p, end, &m->redirect);
2909                 if (ret)
2910                         return ret;
2911         } else {
2912                 ceph_oloc_init(&m->redirect.oloc);
2913         }
2914
2915         return 0;
2916
2917 e_inval:
2918         return -EINVAL;
2919 }
2920
2921 /*
2922  * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
2923  * specified.
2924  */
2925 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2926 {
2927         struct ceph_osd_client *osdc = osd->o_osdc;
2928         struct ceph_osd_request *req;
2929         struct MOSDOpReply m;
2930         u64 tid = le64_to_cpu(msg->hdr.tid);
2931         u32 data_len = 0;
2932         int ret;
2933         int i;
2934
2935         dout("%s msg %p tid %llu\n", __func__, msg, tid);
2936
2937         down_read(&osdc->lock);
2938         if (!osd_registered(osd)) {
2939                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
2940                 goto out_unlock_osdc;
2941         }
2942         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
2943
2944         mutex_lock(&osd->lock);
2945         req = lookup_request(&osd->o_requests, tid);
2946         if (!req) {
2947                 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
2948                 goto out_unlock_session;
2949         }
2950
2951         m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
2952         ret = decode_MOSDOpReply(msg, &m);
2953         m.redirect.oloc.pool_ns = NULL;
2954         if (ret) {
2955                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
2956                        req->r_tid, ret);
2957                 ceph_msg_dump(msg);
2958                 goto fail_request;
2959         }
2960         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
2961              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
2962              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
2963              le64_to_cpu(m.replay_version.version), m.user_version);
2964
2965         if (m.retry_attempt >= 0) {
2966                 if (m.retry_attempt != req->r_attempts - 1) {
2967                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
2968                              req, req->r_tid, m.retry_attempt,
2969                              req->r_attempts - 1);
2970                         goto out_unlock_session;
2971                 }
2972         } else {
2973                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
2974         }
2975
2976         if (!ceph_oloc_empty(&m.redirect.oloc)) {
2977                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
2978                      m.redirect.oloc.pool);
2979                 unlink_request(osd, req);
2980                 mutex_unlock(&osd->lock);
2981
2982                 /*
2983                  * Not ceph_oloc_copy() - changing pool_ns is not
2984                  * supported.
2985                  */
2986                 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
2987                 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
2988                 req->r_tid = 0;
2989                 __submit_request(req, false);
2990                 goto out_unlock_osdc;
2991         }
2992
2993         if (m.num_ops != req->r_num_ops) {
2994                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
2995                        req->r_num_ops, req->r_tid);
2996                 goto fail_request;
2997         }
2998         for (i = 0; i < req->r_num_ops; i++) {
2999                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3000                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
3001                 req->r_ops[i].rval = m.rval[i];
3002                 req->r_ops[i].outdata_len = m.outdata_len[i];
3003                 data_len += m.outdata_len[i];
3004         }
3005         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3006                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3007                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
3008                 goto fail_request;
3009         }
3010         dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3011              req, req->r_tid, m.result, data_len);
3012
3013         /*
3014          * Since we only ever request ONDISK, we should only ever get
3015          * one (type of) reply back.
3016          */
3017         WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3018         req->r_result = m.result ?: data_len;
3019         finish_request(req);
3020         mutex_unlock(&osd->lock);
3021         up_read(&osdc->lock);
3022
3023         __complete_request(req);
3024         complete_all(&req->r_completion);
3025         ceph_osdc_put_request(req);
3026         return;
3027
3028 fail_request:
3029         complete_request(req, -EIO);
3030 out_unlock_session:
3031         mutex_unlock(&osd->lock);
3032 out_unlock_osdc:
3033         up_read(&osdc->lock);
3034 }
3035
3036 static void set_pool_was_full(struct ceph_osd_client *osdc)
3037 {
3038         struct rb_node *n;
3039
3040         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3041                 struct ceph_pg_pool_info *pi =
3042                     rb_entry(n, struct ceph_pg_pool_info, node);
3043
3044                 pi->was_full = __pool_full(pi);
3045         }
3046 }
3047
3048 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3049 {
3050         struct ceph_pg_pool_info *pi;
3051
3052         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3053         if (!pi)
3054                 return false;
3055
3056         return pi->was_full && !__pool_full(pi);
3057 }
3058
3059 static enum calc_target_result
3060 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3061 {
3062         struct ceph_osd_client *osdc = lreq->osdc;
3063         enum calc_target_result ct_res;
3064
3065         ct_res = calc_target(osdc, &lreq->t, true);
3066         if (ct_res == CALC_TARGET_NEED_RESEND) {
3067                 struct ceph_osd *osd;
3068
3069                 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3070                 if (osd != lreq->osd) {
3071                         unlink_linger(lreq->osd, lreq);
3072                         link_linger(osd, lreq);
3073                 }
3074         }
3075
3076         return ct_res;
3077 }
3078
3079 /*
3080  * Requeue requests whose mapping to an OSD has changed.
3081  */
3082 static void scan_requests(struct ceph_osd *osd,
3083                           bool force_resend,
3084                           bool cleared_full,
3085                           bool check_pool_cleared_full,
3086                           struct rb_root *need_resend,
3087                           struct list_head *need_resend_linger)
3088 {
3089         struct ceph_osd_client *osdc = osd->o_osdc;
3090         struct rb_node *n;
3091         bool force_resend_writes;
3092
3093         for (n = rb_first(&osd->o_linger_requests); n; ) {
3094                 struct ceph_osd_linger_request *lreq =
3095                     rb_entry(n, struct ceph_osd_linger_request, node);
3096                 enum calc_target_result ct_res;
3097
3098                 n = rb_next(n); /* recalc_linger_target() */
3099
3100                 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3101                      lreq->linger_id);
3102                 ct_res = recalc_linger_target(lreq);
3103                 switch (ct_res) {
3104                 case CALC_TARGET_NO_ACTION:
3105                         force_resend_writes = cleared_full ||
3106                             (check_pool_cleared_full &&
3107                              pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3108                         if (!force_resend && !force_resend_writes)
3109                                 break;
3110
3111                         /* fall through */
3112                 case CALC_TARGET_NEED_RESEND:
3113                         cancel_linger_map_check(lreq);
3114                         /*
3115                          * scan_requests() for the previous epoch(s)
3116                          * may have already added it to the list, since
3117                          * it's not unlinked here.
3118                          */
3119                         if (list_empty(&lreq->scan_item))
3120                                 list_add_tail(&lreq->scan_item, need_resend_linger);
3121                         break;
3122                 case CALC_TARGET_POOL_DNE:
3123                         check_linger_pool_dne(lreq);
3124                         break;
3125                 }
3126         }
3127
3128         for (n = rb_first(&osd->o_requests); n; ) {
3129                 struct ceph_osd_request *req =
3130                     rb_entry(n, struct ceph_osd_request, r_node);
3131                 enum calc_target_result ct_res;
3132
3133                 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3134
3135                 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3136                 ct_res = calc_target(osdc, &req->r_t, false);
3137                 switch (ct_res) {
3138                 case CALC_TARGET_NO_ACTION:
3139                         force_resend_writes = cleared_full ||
3140                             (check_pool_cleared_full &&
3141                              pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3142                         if (!force_resend &&
3143                             (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3144                              !force_resend_writes))
3145                                 break;
3146
3147                         /* fall through */
3148                 case CALC_TARGET_NEED_RESEND:
3149                         cancel_map_check(req);
3150                         unlink_request(osd, req);
3151                         insert_request(need_resend, req);
3152                         break;
3153                 case CALC_TARGET_POOL_DNE:
3154                         check_pool_dne(req);
3155                         break;
3156                 }
3157         }
3158 }
3159
3160 static int handle_one_map(struct ceph_osd_client *osdc,
3161                           void *p, void *end, bool incremental,
3162                           struct rb_root *need_resend,
3163                           struct list_head *need_resend_linger)
3164 {
3165         struct ceph_osdmap *newmap;
3166         struct rb_node *n;
3167         bool skipped_map = false;
3168         bool was_full;
3169
3170         was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3171         set_pool_was_full(osdc);
3172
3173         if (incremental)
3174                 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3175         else
3176                 newmap = ceph_osdmap_decode(&p, end);
3177         if (IS_ERR(newmap))
3178                 return PTR_ERR(newmap);
3179
3180         if (newmap != osdc->osdmap) {
3181                 /*
3182                  * Preserve ->was_full before destroying the old map.
3183                  * For pools that weren't in the old map, ->was_full
3184                  * should be false.
3185                  */
3186                 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3187                         struct ceph_pg_pool_info *pi =
3188                             rb_entry(n, struct ceph_pg_pool_info, node);
3189                         struct ceph_pg_pool_info *old_pi;
3190
3191                         old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3192                         if (old_pi)
3193                                 pi->was_full = old_pi->was_full;
3194                         else
3195                                 WARN_ON(pi->was_full);
3196                 }
3197
3198                 if (osdc->osdmap->epoch &&
3199                     osdc->osdmap->epoch + 1 < newmap->epoch) {
3200                         WARN_ON(incremental);
3201                         skipped_map = true;
3202                 }
3203
3204                 ceph_osdmap_destroy(osdc->osdmap);
3205                 osdc->osdmap = newmap;
3206         }
3207
3208         was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3209         scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3210                       need_resend, need_resend_linger);
3211
3212         for (n = rb_first(&osdc->osds); n; ) {
3213                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3214
3215                 n = rb_next(n); /* close_osd() */
3216
3217                 scan_requests(osd, skipped_map, was_full, true, need_resend,
3218                               need_resend_linger);
3219                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3220                     memcmp(&osd->o_con.peer_addr,
3221                            ceph_osd_addr(osdc->osdmap, osd->o_osd),
3222                            sizeof(struct ceph_entity_addr)))
3223                         close_osd(osd);
3224         }
3225
3226         return 0;
3227 }
3228
3229 static void kick_requests(struct ceph_osd_client *osdc,
3230                           struct rb_root *need_resend,
3231                           struct list_head *need_resend_linger)
3232 {
3233         struct ceph_osd_linger_request *lreq, *nlreq;
3234         struct rb_node *n;
3235
3236         for (n = rb_first(need_resend); n; ) {
3237                 struct ceph_osd_request *req =
3238                     rb_entry(n, struct ceph_osd_request, r_node);
3239                 struct ceph_osd *osd;
3240
3241                 n = rb_next(n);
3242                 erase_request(need_resend, req); /* before link_request() */
3243
3244                 WARN_ON(req->r_osd);
3245                 calc_target(osdc, &req->r_t, false);
3246                 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3247                 link_request(osd, req);
3248                 if (!req->r_linger) {
3249                         if (!osd_homeless(osd) && !req->r_t.paused)
3250                                 send_request(req);
3251                 } else {
3252                         cancel_linger_request(req);
3253                 }
3254         }
3255
3256         list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3257                 if (!osd_homeless(lreq->osd))
3258                         send_linger(lreq);
3259
3260                 list_del_init(&lreq->scan_item);
3261         }
3262 }
3263
3264 /*
3265  * Process updated osd map.
3266  *
3267  * The message contains any number of incremental and full maps, normally
3268  * indicating some sort of topology change in the cluster.  Kick requests
3269  * off to different OSDs as needed.
3270  */
3271 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3272 {
3273         void *p = msg->front.iov_base;
3274         void *const end = p + msg->front.iov_len;
3275         u32 nr_maps, maplen;
3276         u32 epoch;
3277         struct ceph_fsid fsid;
3278         struct rb_root need_resend = RB_ROOT;
3279         LIST_HEAD(need_resend_linger);
3280         bool handled_incremental = false;
3281         bool was_pauserd, was_pausewr;
3282         bool pauserd, pausewr;
3283         int err;
3284
3285         dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3286         down_write(&osdc->lock);
3287
3288         /* verify fsid */
3289         ceph_decode_need(&p, end, sizeof(fsid), bad);
3290         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3291         if (ceph_check_fsid(osdc->client, &fsid) < 0)
3292                 goto bad;
3293
3294         was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3295         was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3296                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3297                       have_pool_full(osdc);
3298
3299         /* incremental maps */
3300         ceph_decode_32_safe(&p, end, nr_maps, bad);
3301         dout(" %d inc maps\n", nr_maps);
3302         while (nr_maps > 0) {
3303                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3304                 epoch = ceph_decode_32(&p);
3305                 maplen = ceph_decode_32(&p);
3306                 ceph_decode_need(&p, end, maplen, bad);
3307                 if (osdc->osdmap->epoch &&
3308                     osdc->osdmap->epoch + 1 == epoch) {
3309                         dout("applying incremental map %u len %d\n",
3310                              epoch, maplen);
3311                         err = handle_one_map(osdc, p, p + maplen, true,
3312                                              &need_resend, &need_resend_linger);
3313                         if (err)
3314                                 goto bad;
3315                         handled_incremental = true;
3316                 } else {
3317                         dout("ignoring incremental map %u len %d\n",
3318                              epoch, maplen);
3319                 }
3320                 p += maplen;
3321                 nr_maps--;
3322         }
3323         if (handled_incremental)
3324                 goto done;
3325
3326         /* full maps */
3327         ceph_decode_32_safe(&p, end, nr_maps, bad);
3328         dout(" %d full maps\n", nr_maps);
3329         while (nr_maps) {
3330                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3331                 epoch = ceph_decode_32(&p);
3332                 maplen = ceph_decode_32(&p);
3333                 ceph_decode_need(&p, end, maplen, bad);
3334                 if (nr_maps > 1) {
3335                         dout("skipping non-latest full map %u len %d\n",
3336                              epoch, maplen);
3337                 } else if (osdc->osdmap->epoch >= epoch) {
3338                         dout("skipping full map %u len %d, "
3339                              "older than our %u\n", epoch, maplen,
3340                              osdc->osdmap->epoch);
3341                 } else {
3342                         dout("taking full map %u len %d\n", epoch, maplen);
3343                         err = handle_one_map(osdc, p, p + maplen, false,
3344                                              &need_resend, &need_resend_linger);
3345                         if (err)
3346                                 goto bad;
3347                 }
3348                 p += maplen;
3349                 nr_maps--;
3350         }
3351
3352 done:
3353         /*
3354          * subscribe to subsequent osdmap updates if full to ensure
3355          * we find out when we are no longer full and stop returning
3356          * ENOSPC.
3357          */
3358         pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3359         pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3360                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3361                   have_pool_full(osdc);
3362         if (was_pauserd || was_pausewr || pauserd || pausewr ||
3363             osdc->osdmap->epoch < osdc->epoch_barrier)
3364                 maybe_request_map(osdc);
3365
3366         kick_requests(osdc, &need_resend, &need_resend_linger);
3367
3368         ceph_osdc_abort_on_full(osdc);
3369         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3370                           osdc->osdmap->epoch);
3371         up_write(&osdc->lock);
3372         wake_up_all(&osdc->client->auth_wq);
3373         return;
3374
3375 bad:
3376         pr_err("osdc handle_map corrupt msg\n");
3377         ceph_msg_dump(msg);
3378         up_write(&osdc->lock);
3379 }
3380
3381 /*
3382  * Resubmit requests pending on the given osd.
3383  */
3384 static void kick_osd_requests(struct ceph_osd *osd)
3385 {
3386         struct rb_node *n;
3387
3388         for (n = rb_first(&osd->o_requests); n; ) {
3389                 struct ceph_osd_request *req =
3390                     rb_entry(n, struct ceph_osd_request, r_node);
3391
3392                 n = rb_next(n); /* cancel_linger_request() */
3393
3394                 if (!req->r_linger) {
3395                         if (!req->r_t.paused)
3396                                 send_request(req);
3397                 } else {
3398                         cancel_linger_request(req);
3399                 }
3400         }
3401         for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3402                 struct ceph_osd_linger_request *lreq =
3403                     rb_entry(n, struct ceph_osd_linger_request, node);
3404
3405                 send_linger(lreq);
3406         }
3407 }
3408
3409 /*
3410  * If the osd connection drops, we need to resubmit all requests.
3411  */
3412 static void osd_fault(struct ceph_connection *con)
3413 {
3414         struct ceph_osd *osd = con->private;
3415         struct ceph_osd_client *osdc = osd->o_osdc;
3416
3417         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3418
3419         down_write(&osdc->lock);
3420         if (!osd_registered(osd)) {
3421                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3422                 goto out_unlock;
3423         }
3424
3425         if (!reopen_osd(osd))
3426                 kick_osd_requests(osd);
3427         maybe_request_map(osdc);
3428
3429 out_unlock:
3430         up_write(&osdc->lock);
3431 }
3432
3433 /*
3434  * Process osd watch notifications
3435  */
3436 static void handle_watch_notify(struct ceph_osd_client *osdc,
3437                                 struct ceph_msg *msg)
3438 {
3439         void *p = msg->front.iov_base;
3440         void *const end = p + msg->front.iov_len;
3441         struct ceph_osd_linger_request *lreq;
3442         struct linger_work *lwork;
3443         u8 proto_ver, opcode;
3444         u64 cookie, notify_id;
3445         u64 notifier_id = 0;
3446         s32 return_code = 0;
3447         void *payload = NULL;
3448         u32 payload_len = 0;
3449
3450         ceph_decode_8_safe(&p, end, proto_ver, bad);
3451         ceph_decode_8_safe(&p, end, opcode, bad);
3452         ceph_decode_64_safe(&p, end, cookie, bad);
3453         p += 8; /* skip ver */
3454         ceph_decode_64_safe(&p, end, notify_id, bad);
3455
3456         if (proto_ver >= 1) {
3457                 ceph_decode_32_safe(&p, end, payload_len, bad);
3458                 ceph_decode_need(&p, end, payload_len, bad);
3459                 payload = p;
3460                 p += payload_len;
3461         }
3462
3463         if (le16_to_cpu(msg->hdr.version) >= 2)
3464                 ceph_decode_32_safe(&p, end, return_code, bad);
3465
3466         if (le16_to_cpu(msg->hdr.version) >= 3)
3467                 ceph_decode_64_safe(&p, end, notifier_id, bad);
3468
3469         down_read(&osdc->lock);
3470         lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
3471         if (!lreq) {
3472                 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
3473                      cookie);
3474                 goto out_unlock_osdc;
3475         }
3476
3477         mutex_lock(&lreq->lock);
3478         dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
3479              opcode, cookie, lreq, lreq->is_watch);
3480         if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
3481                 if (!lreq->last_error) {
3482                         lreq->last_error = -ENOTCONN;
3483                         queue_watch_error(lreq);
3484                 }
3485         } else if (!lreq->is_watch) {
3486                 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
3487                 if (lreq->notify_id && lreq->notify_id != notify_id) {
3488                         dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
3489                              lreq->notify_id, notify_id);
3490                 } else if (!completion_done(&lreq->notify_finish_wait)) {
3491                         struct ceph_msg_data *data =
3492                             list_first_entry_or_null(&msg->data,
3493                                                      struct ceph_msg_data,
3494                                                      links);
3495
3496                         if (data) {
3497                                 if (lreq->preply_pages) {
3498                                         WARN_ON(data->type !=
3499                                                         CEPH_MSG_DATA_PAGES);
3500                                         *lreq->preply_pages = data->pages;
3501                                         *lreq->preply_len = data->length;
3502                                 } else {
3503                                         ceph_release_page_vector(data->pages,
3504                                                calc_pages_for(0, data->length));
3505                                 }
3506                         }
3507                         lreq->notify_finish_error = return_code;
3508                         complete_all(&lreq->notify_finish_wait);
3509                 }
3510         } else {
3511                 /* CEPH_WATCH_EVENT_NOTIFY */
3512                 lwork = lwork_alloc(lreq, do_watch_notify);
3513                 if (!lwork) {
3514                         pr_err("failed to allocate notify-lwork\n");
3515                         goto out_unlock_lreq;
3516                 }
3517
3518                 lwork->notify.notify_id = notify_id;
3519                 lwork->notify.notifier_id = notifier_id;
3520                 lwork->notify.payload = payload;
3521                 lwork->notify.payload_len = payload_len;
3522                 lwork->notify.msg = ceph_msg_get(msg);
3523                 lwork_queue(lwork);
3524         }
3525
3526 out_unlock_lreq:
3527         mutex_unlock(&lreq->lock);
3528 out_unlock_osdc:
3529         up_read(&osdc->lock);
3530         return;
3531
3532 bad:
3533         pr_err("osdc handle_watch_notify corrupt msg\n");
3534 }
3535
3536 /*
3537  * Register request, send initial attempt.
3538  */
3539 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
3540                             struct ceph_osd_request *req,
3541                             bool nofail)
3542 {
3543         down_read(&osdc->lock);
3544         submit_request(req, false);
3545         up_read(&osdc->lock);
3546
3547         return 0;
3548 }
3549 EXPORT_SYMBOL(ceph_osdc_start_request);
3550
3551 /*
3552  * Unregister a registered request.  The request is not completed:
3553  * ->r_result isn't set and __complete_request() isn't called.
3554  */
3555 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
3556 {
3557         struct ceph_osd_client *osdc = req->r_osdc;
3558
3559         down_write(&osdc->lock);
3560         if (req->r_osd)
3561                 cancel_request(req);
3562         up_write(&osdc->lock);
3563 }
3564 EXPORT_SYMBOL(ceph_osdc_cancel_request);
3565
3566 /*
3567  * @timeout: in jiffies, 0 means "wait forever"
3568  */
3569 static int wait_request_timeout(struct ceph_osd_request *req,
3570                                 unsigned long timeout)
3571 {
3572         long left;
3573
3574         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3575         left = wait_for_completion_killable_timeout(&req->r_completion,
3576                                                 ceph_timeout_jiffies(timeout));
3577         if (left <= 0) {
3578                 left = left ?: -ETIMEDOUT;
3579                 ceph_osdc_cancel_request(req);
3580         } else {
3581                 left = req->r_result; /* completed */
3582         }
3583
3584         return left;
3585 }
3586
3587 /*
3588  * wait for a request to complete
3589  */
3590 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
3591                            struct ceph_osd_request *req)
3592 {
3593         return wait_request_timeout(req, 0);
3594 }
3595 EXPORT_SYMBOL(ceph_osdc_wait_request);
3596
3597 /*
3598  * sync - wait for all in-flight requests to flush.  avoid starvation.
3599  */
3600 void ceph_osdc_sync(struct ceph_osd_client *osdc)
3601 {
3602         struct rb_node *n, *p;
3603         u64 last_tid = atomic64_read(&osdc->last_tid);
3604
3605 again:
3606         down_read(&osdc->lock);
3607         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3608                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3609
3610                 mutex_lock(&osd->lock);
3611                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
3612                         struct ceph_osd_request *req =
3613                             rb_entry(p, struct ceph_osd_request, r_node);
3614
3615                         if (req->r_tid > last_tid)
3616                                 break;
3617
3618                         if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
3619                                 continue;
3620
3621                         ceph_osdc_get_request(req);
3622                         mutex_unlock(&osd->lock);
3623                         up_read(&osdc->lock);
3624                         dout("%s waiting on req %p tid %llu last_tid %llu\n",
3625                              __func__, req, req->r_tid, last_tid);
3626                         wait_for_completion(&req->r_completion);
3627                         ceph_osdc_put_request(req);
3628                         goto again;
3629                 }
3630
3631                 mutex_unlock(&osd->lock);
3632         }
3633
3634         up_read(&osdc->lock);
3635         dout("%s done last_tid %llu\n", __func__, last_tid);
3636 }
3637 EXPORT_SYMBOL(ceph_osdc_sync);
3638
3639 static struct ceph_osd_request *
3640 alloc_linger_request(struct ceph_osd_linger_request *lreq)
3641 {
3642         struct ceph_osd_request *req;
3643
3644         req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
3645         if (!req)
3646                 return NULL;
3647
3648         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3649         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3650
3651         if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
3652                 ceph_osdc_put_request(req);
3653                 return NULL;
3654         }
3655
3656         return req;
3657 }
3658
3659 /*
3660  * Returns a handle, caller owns a ref.
3661  */
3662 struct ceph_osd_linger_request *
3663 ceph_osdc_watch(struct ceph_osd_client *osdc,
3664                 struct ceph_object_id *oid,
3665                 struct ceph_object_locator *oloc,
3666                 rados_watchcb2_t wcb,
3667                 rados_watcherrcb_t errcb,
3668                 void *data)
3669 {
3670         struct ceph_osd_linger_request *lreq;
3671         int ret;
3672
3673         lreq = linger_alloc(osdc);
3674         if (!lreq)
3675                 return ERR_PTR(-ENOMEM);
3676
3677         lreq->is_watch = true;
3678         lreq->wcb = wcb;
3679         lreq->errcb = errcb;
3680         lreq->data = data;
3681         lreq->watch_valid_thru = jiffies;
3682
3683         ceph_oid_copy(&lreq->t.base_oid, oid);
3684         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3685         lreq->t.flags = CEPH_OSD_FLAG_WRITE;
3686         ktime_get_real_ts(&lreq->mtime);
3687
3688         lreq->reg_req = alloc_linger_request(lreq);
3689         if (!lreq->reg_req) {
3690                 ret = -ENOMEM;
3691                 goto err_put_lreq;
3692         }
3693
3694         lreq->ping_req = alloc_linger_request(lreq);
3695         if (!lreq->ping_req) {
3696                 ret = -ENOMEM;
3697                 goto err_put_lreq;
3698         }
3699
3700         down_write(&osdc->lock);
3701         linger_register(lreq); /* before osd_req_op_* */
3702         osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
3703                               CEPH_OSD_WATCH_OP_WATCH);
3704         osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
3705                               CEPH_OSD_WATCH_OP_PING);
3706         linger_submit(lreq);
3707         up_write(&osdc->lock);
3708
3709         ret = linger_reg_commit_wait(lreq);
3710         if (ret) {
3711                 linger_cancel(lreq);
3712                 goto err_put_lreq;
3713         }
3714
3715         return lreq;
3716
3717 err_put_lreq:
3718         linger_put(lreq);
3719         return ERR_PTR(ret);
3720 }
3721 EXPORT_SYMBOL(ceph_osdc_watch);
3722
3723 /*
3724  * Releases a ref.
3725  *
3726  * Times out after mount_timeout to preserve rbd unmap behaviour
3727  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
3728  * with mount_timeout").
3729  */
3730 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
3731                       struct ceph_osd_linger_request *lreq)
3732 {
3733         struct ceph_options *opts = osdc->client->options;
3734         struct ceph_osd_request *req;
3735         int ret;
3736
3737         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3738         if (!req)
3739                 return -ENOMEM;
3740
3741         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3742         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3743         req->r_flags = CEPH_OSD_FLAG_WRITE;
3744         ktime_get_real_ts(&req->r_mtime);
3745         osd_req_op_watch_init(req, 0, lreq->linger_id,
3746                               CEPH_OSD_WATCH_OP_UNWATCH);
3747
3748         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3749         if (ret)
3750                 goto out_put_req;
3751
3752         ceph_osdc_start_request(osdc, req, false);
3753         linger_cancel(lreq);
3754         linger_put(lreq);
3755         ret = wait_request_timeout(req, opts->mount_timeout);
3756
3757 out_put_req:
3758         ceph_osdc_put_request(req);
3759         return ret;
3760 }
3761 EXPORT_SYMBOL(ceph_osdc_unwatch);
3762
3763 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
3764                                       u64 notify_id, u64 cookie, void *payload,
3765                                       size_t payload_len)
3766 {
3767         struct ceph_osd_req_op *op;
3768         struct ceph_pagelist *pl;
3769         int ret;
3770
3771         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
3772
3773         pl = kmalloc(sizeof(*pl), GFP_NOIO);
3774         if (!pl)
3775                 return -ENOMEM;
3776
3777         ceph_pagelist_init(pl);
3778         ret = ceph_pagelist_encode_64(pl, notify_id);
3779         ret |= ceph_pagelist_encode_64(pl, cookie);
3780         if (payload) {
3781                 ret |= ceph_pagelist_encode_32(pl, payload_len);
3782                 ret |= ceph_pagelist_append(pl, payload, payload_len);
3783         } else {
3784                 ret |= ceph_pagelist_encode_32(pl, 0);
3785         }
3786         if (ret) {
3787                 ceph_pagelist_release(pl);
3788                 return -ENOMEM;
3789         }
3790
3791         ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
3792         op->indata_len = pl->length;
3793         return 0;
3794 }
3795
3796 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
3797                          struct ceph_object_id *oid,
3798                          struct ceph_object_locator *oloc,
3799                          u64 notify_id,
3800                          u64 cookie,
3801                          void *payload,
3802                          size_t payload_len)
3803 {
3804         struct ceph_osd_request *req;
3805         int ret;
3806
3807         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3808         if (!req)
3809                 return -ENOMEM;
3810
3811         ceph_oid_copy(&req->r_base_oid, oid);
3812         ceph_oloc_copy(&req->r_base_oloc, oloc);
3813         req->r_flags = CEPH_OSD_FLAG_READ;
3814
3815         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3816         if (ret)
3817                 goto out_put_req;
3818
3819         ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
3820                                          payload_len);
3821         if (ret)
3822                 goto out_put_req;
3823
3824         ceph_osdc_start_request(osdc, req, false);
3825         ret = ceph_osdc_wait_request(osdc, req);
3826
3827 out_put_req:
3828         ceph_osdc_put_request(req);
3829         return ret;
3830 }
3831 EXPORT_SYMBOL(ceph_osdc_notify_ack);
3832
3833 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
3834                                   u64 cookie, u32 prot_ver, u32 timeout,
3835                                   void *payload, size_t payload_len)
3836 {
3837         struct ceph_osd_req_op *op;
3838         struct ceph_pagelist *pl;
3839         int ret;
3840
3841         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
3842         op->notify.cookie = cookie;
3843
3844         pl = kmalloc(sizeof(*pl), GFP_NOIO);
3845         if (!pl)
3846                 return -ENOMEM;
3847
3848         ceph_pagelist_init(pl);
3849         ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
3850         ret |= ceph_pagelist_encode_32(pl, timeout);
3851         ret |= ceph_pagelist_encode_32(pl, payload_len);
3852         ret |= ceph_pagelist_append(pl, payload, payload_len);
3853         if (ret) {
3854                 ceph_pagelist_release(pl);
3855                 return -ENOMEM;
3856         }
3857
3858         ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
3859         op->indata_len = pl->length;
3860         return 0;
3861 }
3862
3863 /*
3864  * @timeout: in seconds
3865  *
3866  * @preply_{pages,len} are initialized both on success and error.
3867  * The caller is responsible for:
3868  *
3869  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
3870  */
3871 int ceph_osdc_notify(struct ceph_osd_client *osdc,
3872                      struct ceph_object_id *oid,
3873                      struct ceph_object_locator *oloc,
3874                      void *payload,
3875                      size_t payload_len,
3876                      u32 timeout,
3877                      struct page ***preply_pages,
3878                      size_t *preply_len)
3879 {
3880         struct ceph_osd_linger_request *lreq;
3881         struct page **pages;
3882         int ret;
3883
3884         WARN_ON(!timeout);
3885         if (preply_pages) {
3886                 *preply_pages = NULL;
3887                 *preply_len = 0;
3888         }
3889
3890         lreq = linger_alloc(osdc);
3891         if (!lreq)
3892                 return -ENOMEM;
3893
3894         lreq->preply_pages = preply_pages;
3895         lreq->preply_len = preply_len;
3896
3897         ceph_oid_copy(&lreq->t.base_oid, oid);
3898         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3899         lreq->t.flags = CEPH_OSD_FLAG_READ;
3900
3901         lreq->reg_req = alloc_linger_request(lreq);
3902         if (!lreq->reg_req) {
3903                 ret = -ENOMEM;
3904                 goto out_put_lreq;
3905         }
3906
3907         /* for notify_id */
3908         pages = ceph_alloc_page_vector(1, GFP_NOIO);
3909         if (IS_ERR(pages)) {
3910                 ret = PTR_ERR(pages);
3911                 goto out_put_lreq;
3912         }
3913
3914         down_write(&osdc->lock);
3915         linger_register(lreq); /* before osd_req_op_* */
3916         ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
3917                                      timeout, payload, payload_len);
3918         if (ret) {
3919                 linger_unregister(lreq);
3920                 up_write(&osdc->lock);
3921                 ceph_release_page_vector(pages, 1);
3922                 goto out_put_lreq;
3923         }
3924         ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
3925                                                  response_data),
3926                                  pages, PAGE_SIZE, 0, false, true);
3927         linger_submit(lreq);
3928         up_write(&osdc->lock);
3929
3930         ret = linger_reg_commit_wait(lreq);
3931         if (!ret)
3932                 ret = linger_notify_finish_wait(lreq);
3933         else
3934                 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
3935
3936         linger_cancel(lreq);
3937 out_put_lreq:
3938         linger_put(lreq);
3939         return ret;
3940 }
3941 EXPORT_SYMBOL(ceph_osdc_notify);
3942
3943 /*
3944  * Return the number of milliseconds since the watch was last
3945  * confirmed, or an error.  If there is an error, the watch is no
3946  * longer valid, and should be destroyed with ceph_osdc_unwatch().
3947  */
3948 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
3949                           struct ceph_osd_linger_request *lreq)
3950 {
3951         unsigned long stamp, age;
3952         int ret;
3953
3954         down_read(&osdc->lock);
3955         mutex_lock(&lreq->lock);
3956         stamp = lreq->watch_valid_thru;
3957         if (!list_empty(&lreq->pending_lworks)) {
3958                 struct linger_work *lwork =
3959                     list_first_entry(&lreq->pending_lworks,
3960                                      struct linger_work,
3961                                      pending_item);
3962
3963                 if (time_before(lwork->queued_stamp, stamp))
3964                         stamp = lwork->queued_stamp;
3965         }
3966         age = jiffies - stamp;
3967         dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
3968              lreq, lreq->linger_id, age, lreq->last_error);
3969         /* we are truncating to msecs, so return a safe upper bound */
3970         ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
3971
3972         mutex_unlock(&lreq->lock);
3973         up_read(&osdc->lock);
3974         return ret;
3975 }
3976
3977 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
3978 {
3979         u8 struct_v;
3980         u32 struct_len;
3981         int ret;
3982
3983         ret = ceph_start_decoding(p, end, 2, "watch_item_t",
3984                                   &struct_v, &struct_len);
3985         if (ret)
3986                 return ret;
3987
3988         ceph_decode_copy(p, &item->name, sizeof(item->name));
3989         item->cookie = ceph_decode_64(p);
3990         *p += 4; /* skip timeout_seconds */
3991         if (struct_v >= 2) {
3992                 ceph_decode_copy(p, &item->addr, sizeof(item->addr));
3993                 ceph_decode_addr(&item->addr);
3994         }
3995
3996         dout("%s %s%llu cookie %llu addr %s\n", __func__,
3997              ENTITY_NAME(item->name), item->cookie,
3998              ceph_pr_addr(&item->addr.in_addr));
3999         return 0;
4000 }
4001
4002 static int decode_watchers(void **p, void *end,
4003                            struct ceph_watch_item **watchers,
4004                            u32 *num_watchers)
4005 {
4006         u8 struct_v;
4007         u32 struct_len;
4008         int i;
4009         int ret;
4010
4011         ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4012                                   &struct_v, &struct_len);
4013         if (ret)
4014                 return ret;
4015
4016         *num_watchers = ceph_decode_32(p);
4017         *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4018         if (!*watchers)
4019                 return -ENOMEM;
4020
4021         for (i = 0; i < *num_watchers; i++) {
4022                 ret = decode_watcher(p, end, *watchers + i);
4023                 if (ret) {
4024                         kfree(*watchers);
4025                         return ret;
4026                 }
4027         }
4028
4029         return 0;
4030 }
4031
4032 /*
4033  * On success, the caller is responsible for:
4034  *
4035  *     kfree(watchers);
4036  */
4037 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4038                             struct ceph_object_id *oid,
4039                             struct ceph_object_locator *oloc,
4040                             struct ceph_watch_item **watchers,
4041                             u32 *num_watchers)
4042 {
4043         struct ceph_osd_request *req;
4044         struct page **pages;
4045         int ret;
4046
4047         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4048         if (!req)
4049                 return -ENOMEM;
4050
4051         ceph_oid_copy(&req->r_base_oid, oid);
4052         ceph_oloc_copy(&req->r_base_oloc, oloc);
4053         req->r_flags = CEPH_OSD_FLAG_READ;
4054
4055         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4056         if (ret)
4057                 goto out_put_req;
4058
4059         pages = ceph_alloc_page_vector(1, GFP_NOIO);
4060         if (IS_ERR(pages)) {
4061                 ret = PTR_ERR(pages);
4062                 goto out_put_req;
4063         }
4064
4065         osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
4066         ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
4067                                                  response_data),
4068                                  pages, PAGE_SIZE, 0, false, true);
4069
4070         ceph_osdc_start_request(osdc, req, false);
4071         ret = ceph_osdc_wait_request(osdc, req);
4072         if (ret >= 0) {
4073                 void *p = page_address(pages[0]);
4074                 void *const end = p + req->r_ops[0].outdata_len;
4075
4076                 ret = decode_watchers(&p, end, watchers, num_watchers);
4077         }
4078
4079 out_put_req:
4080         ceph_osdc_put_request(req);
4081         return ret;
4082 }
4083 EXPORT_SYMBOL(ceph_osdc_list_watchers);
4084
4085 /*
4086  * Call all pending notify callbacks - for use after a watch is
4087  * unregistered, to make sure no more callbacks for it will be invoked
4088  */
4089 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4090 {
4091         dout("%s osdc %p\n", __func__, osdc);
4092         flush_workqueue(osdc->notify_wq);
4093 }
4094 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4095
4096 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4097 {
4098         down_read(&osdc->lock);
4099         maybe_request_map(osdc);
4100         up_read(&osdc->lock);
4101 }
4102 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4103
4104 /*
4105  * Execute an OSD class method on an object.
4106  *
4107  * @flags: CEPH_OSD_FLAG_*
4108  * @resp_len: in/out param for reply length
4109  */
4110 int ceph_osdc_call(struct ceph_osd_client *osdc,
4111                    struct ceph_object_id *oid,
4112                    struct ceph_object_locator *oloc,
4113                    const char *class, const char *method,
4114                    unsigned int flags,
4115                    struct page *req_page, size_t req_len,
4116                    struct page *resp_page, size_t *resp_len)
4117 {
4118         struct ceph_osd_request *req;
4119         int ret;
4120
4121         if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE))
4122                 return -E2BIG;
4123
4124         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4125         if (!req)
4126                 return -ENOMEM;
4127
4128         ceph_oid_copy(&req->r_base_oid, oid);
4129         ceph_oloc_copy(&req->r_base_oloc, oloc);
4130         req->r_flags = flags;
4131
4132         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4133         if (ret)
4134                 goto out_put_req;
4135
4136         osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4137         if (req_page)
4138                 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4139                                                   0, false, false);
4140         if (resp_page)
4141                 osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4142                                                    *resp_len, 0, false, false);
4143
4144         ceph_osdc_start_request(osdc, req, false);
4145         ret = ceph_osdc_wait_request(osdc, req);
4146         if (ret >= 0) {
4147                 ret = req->r_ops[0].rval;
4148                 if (resp_page)
4149                         *resp_len = req->r_ops[0].outdata_len;
4150         }
4151
4152 out_put_req:
4153         ceph_osdc_put_request(req);
4154         return ret;
4155 }
4156 EXPORT_SYMBOL(ceph_osdc_call);
4157
4158 /*
4159  * init, shutdown
4160  */
4161 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4162 {
4163         int err;
4164
4165         dout("init\n");
4166         osdc->client = client;
4167         init_rwsem(&osdc->lock);
4168         osdc->osds = RB_ROOT;
4169         INIT_LIST_HEAD(&osdc->osd_lru);
4170         spin_lock_init(&osdc->osd_lru_lock);
4171         osd_init(&osdc->homeless_osd);
4172         osdc->homeless_osd.o_osdc = osdc;
4173         osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
4174         osdc->last_linger_id = CEPH_LINGER_ID_START;
4175         osdc->linger_requests = RB_ROOT;
4176         osdc->map_checks = RB_ROOT;
4177         osdc->linger_map_checks = RB_ROOT;
4178         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
4179         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
4180
4181         err = -ENOMEM;
4182         osdc->osdmap = ceph_osdmap_alloc();
4183         if (!osdc->osdmap)
4184                 goto out;
4185
4186         osdc->req_mempool = mempool_create_slab_pool(10,
4187                                                      ceph_osd_request_cache);
4188         if (!osdc->req_mempool)
4189                 goto out_map;
4190
4191         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
4192                                 PAGE_SIZE, 10, true, "osd_op");
4193         if (err < 0)
4194                 goto out_mempool;
4195         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4196                                 PAGE_SIZE, 10, true, "osd_op_reply");
4197         if (err < 0)
4198                 goto out_msgpool;
4199
4200         err = -ENOMEM;
4201         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
4202         if (!osdc->notify_wq)
4203                 goto out_msgpool_reply;
4204
4205         schedule_delayed_work(&osdc->timeout_work,
4206                               osdc->client->options->osd_keepalive_timeout);
4207         schedule_delayed_work(&osdc->osds_timeout_work,
4208             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
4209
4210         return 0;
4211
4212 out_msgpool_reply:
4213         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4214 out_msgpool:
4215         ceph_msgpool_destroy(&osdc->msgpool_op);
4216 out_mempool:
4217         mempool_destroy(osdc->req_mempool);
4218 out_map:
4219         ceph_osdmap_destroy(osdc->osdmap);
4220 out:
4221         return err;
4222 }
4223
4224 void ceph_osdc_stop(struct ceph_osd_client *osdc)
4225 {
4226         flush_workqueue(osdc->notify_wq);
4227         destroy_workqueue(osdc->notify_wq);
4228         cancel_delayed_work_sync(&osdc->timeout_work);
4229         cancel_delayed_work_sync(&osdc->osds_timeout_work);
4230
4231         down_write(&osdc->lock);
4232         while (!RB_EMPTY_ROOT(&osdc->osds)) {
4233                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
4234                                                 struct ceph_osd, o_node);
4235                 close_osd(osd);
4236         }
4237         up_write(&osdc->lock);
4238         WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
4239         osd_cleanup(&osdc->homeless_osd);
4240
4241         WARN_ON(!list_empty(&osdc->osd_lru));
4242         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
4243         WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
4244         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
4245         WARN_ON(atomic_read(&osdc->num_requests));
4246         WARN_ON(atomic_read(&osdc->num_homeless));
4247
4248         ceph_osdmap_destroy(osdc->osdmap);
4249         mempool_destroy(osdc->req_mempool);
4250         ceph_msgpool_destroy(&osdc->msgpool_op);
4251         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4252 }
4253
4254 /*
4255  * Read some contiguous pages.  If we cross a stripe boundary, shorten
4256  * *plen.  Return number of bytes read, or error.
4257  */
4258 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
4259                         struct ceph_vino vino, struct ceph_file_layout *layout,
4260                         u64 off, u64 *plen,
4261                         u32 truncate_seq, u64 truncate_size,
4262                         struct page **pages, int num_pages, int page_align)
4263 {
4264         struct ceph_osd_request *req;
4265         int rc = 0;
4266
4267         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
4268              vino.snap, off, *plen);
4269         req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
4270                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
4271                                     NULL, truncate_seq, truncate_size,
4272                                     false);
4273         if (IS_ERR(req))
4274                 return PTR_ERR(req);
4275
4276         /* it may be a short read due to an object boundary */
4277         osd_req_op_extent_osd_data_pages(req, 0,
4278                                 pages, *plen, page_align, false, false);
4279
4280         dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
4281              off, *plen, *plen, page_align);
4282
4283         rc = ceph_osdc_start_request(osdc, req, false);
4284         if (!rc)
4285                 rc = ceph_osdc_wait_request(osdc, req);
4286
4287         ceph_osdc_put_request(req);
4288         dout("readpages result %d\n", rc);
4289         return rc;
4290 }
4291 EXPORT_SYMBOL(ceph_osdc_readpages);
4292
4293 /*
4294  * do a synchronous write on N pages
4295  */
4296 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
4297                          struct ceph_file_layout *layout,
4298                          struct ceph_snap_context *snapc,
4299                          u64 off, u64 len,
4300                          u32 truncate_seq, u64 truncate_size,
4301                          struct timespec *mtime,
4302                          struct page **pages, int num_pages)
4303 {
4304         struct ceph_osd_request *req;
4305         int rc = 0;
4306         int page_align = off & ~PAGE_MASK;
4307
4308         req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
4309                                     CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
4310                                     snapc, truncate_seq, truncate_size,
4311                                     true);
4312         if (IS_ERR(req))
4313                 return PTR_ERR(req);
4314
4315         /* it may be a short write due to an object boundary */
4316         osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
4317                                 false, false);
4318         dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
4319
4320         req->r_mtime = *mtime;
4321         rc = ceph_osdc_start_request(osdc, req, true);
4322         if (!rc)
4323                 rc = ceph_osdc_wait_request(osdc, req);
4324
4325         ceph_osdc_put_request(req);
4326         if (rc == 0)
4327                 rc = len;
4328         dout("writepages result %d\n", rc);
4329         return rc;
4330 }
4331 EXPORT_SYMBOL(ceph_osdc_writepages);
4332
4333 int ceph_osdc_setup(void)
4334 {
4335         size_t size = sizeof(struct ceph_osd_request) +
4336             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
4337
4338         BUG_ON(ceph_osd_request_cache);
4339         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
4340                                                    0, 0, NULL);
4341
4342         return ceph_osd_request_cache ? 0 : -ENOMEM;
4343 }
4344 EXPORT_SYMBOL(ceph_osdc_setup);
4345
4346 void ceph_osdc_cleanup(void)
4347 {
4348         BUG_ON(!ceph_osd_request_cache);
4349         kmem_cache_destroy(ceph_osd_request_cache);
4350         ceph_osd_request_cache = NULL;
4351 }
4352 EXPORT_SYMBOL(ceph_osdc_cleanup);
4353
4354 /*
4355  * handle incoming message
4356  */
4357 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
4358 {
4359         struct ceph_osd *osd = con->private;
4360         struct ceph_osd_client *osdc = osd->o_osdc;
4361         int type = le16_to_cpu(msg->hdr.type);
4362
4363         switch (type) {
4364         case CEPH_MSG_OSD_MAP:
4365                 ceph_osdc_handle_map(osdc, msg);
4366                 break;
4367         case CEPH_MSG_OSD_OPREPLY:
4368                 handle_reply(osd, msg);
4369                 break;
4370         case CEPH_MSG_WATCH_NOTIFY:
4371                 handle_watch_notify(osdc, msg);
4372                 break;
4373
4374         default:
4375                 pr_err("received unknown message type %d %s\n", type,
4376                        ceph_msg_type_name(type));
4377         }
4378
4379         ceph_msg_put(msg);
4380 }
4381
4382 /*
4383  * Lookup and return message for incoming reply.  Don't try to do
4384  * anything about a larger than preallocated data portion of the
4385  * message at the moment - for now, just skip the message.
4386  */
4387 static struct ceph_msg *get_reply(struct ceph_connection *con,
4388                                   struct ceph_msg_header *hdr,
4389                                   int *skip)
4390 {
4391         struct ceph_osd *osd = con->private;
4392         struct ceph_osd_client *osdc = osd->o_osdc;
4393         struct ceph_msg *m = NULL;
4394         struct ceph_osd_request *req;
4395         int front_len = le32_to_cpu(hdr->front_len);
4396         int data_len = le32_to_cpu(hdr->data_len);
4397         u64 tid = le64_to_cpu(hdr->tid);
4398
4399         down_read(&osdc->lock);
4400         if (!osd_registered(osd)) {
4401                 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
4402                 *skip = 1;
4403                 goto out_unlock_osdc;
4404         }
4405         WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
4406
4407         mutex_lock(&osd->lock);
4408         req = lookup_request(&osd->o_requests, tid);
4409         if (!req) {
4410                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
4411                      osd->o_osd, tid);
4412                 *skip = 1;
4413                 goto out_unlock_session;
4414         }
4415
4416         ceph_msg_revoke_incoming(req->r_reply);
4417
4418         if (front_len > req->r_reply->front_alloc_len) {
4419                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
4420                         __func__, osd->o_osd, req->r_tid, front_len,
4421                         req->r_reply->front_alloc_len);
4422                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
4423                                  false);
4424                 if (!m)
4425                         goto out_unlock_session;
4426                 ceph_msg_put(req->r_reply);
4427                 req->r_reply = m;
4428         }
4429
4430         if (data_len > req->r_reply->data_length) {
4431                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
4432                         __func__, osd->o_osd, req->r_tid, data_len,
4433                         req->r_reply->data_length);
4434                 m = NULL;
4435                 *skip = 1;
4436                 goto out_unlock_session;
4437         }
4438
4439         m = ceph_msg_get(req->r_reply);
4440         dout("get_reply tid %lld %p\n", tid, m);
4441
4442 out_unlock_session:
4443         mutex_unlock(&osd->lock);
4444 out_unlock_osdc:
4445         up_read(&osdc->lock);
4446         return m;
4447 }
4448
4449 /*
4450  * TODO: switch to a msg-owned pagelist
4451  */
4452 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
4453 {
4454         struct ceph_msg *m;
4455         int type = le16_to_cpu(hdr->type);
4456         u32 front_len = le32_to_cpu(hdr->front_len);
4457         u32 data_len = le32_to_cpu(hdr->data_len);
4458
4459         m = ceph_msg_new(type, front_len, GFP_NOIO, false);
4460         if (!m)
4461                 return NULL;
4462
4463         if (data_len) {
4464                 struct page **pages;
4465                 struct ceph_osd_data osd_data;
4466
4467                 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
4468                                                GFP_NOIO);
4469                 if (IS_ERR(pages)) {
4470                         ceph_msg_put(m);
4471                         return NULL;
4472                 }
4473
4474                 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
4475                                          false);
4476                 ceph_osdc_msg_data_add(m, &osd_data);
4477         }
4478
4479         return m;
4480 }
4481
4482 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
4483                                   struct ceph_msg_header *hdr,
4484                                   int *skip)
4485 {
4486         struct ceph_osd *osd = con->private;
4487         int type = le16_to_cpu(hdr->type);
4488
4489         *skip = 0;
4490         switch (type) {
4491         case CEPH_MSG_OSD_MAP:
4492         case CEPH_MSG_WATCH_NOTIFY:
4493                 return alloc_msg_with_page_vector(hdr);
4494         case CEPH_MSG_OSD_OPREPLY:
4495                 return get_reply(con, hdr, skip);
4496         default:
4497                 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
4498                         osd->o_osd, type);
4499                 *skip = 1;
4500                 return NULL;
4501         }
4502 }
4503
4504 /*
4505  * Wrappers to refcount containing ceph_osd struct
4506  */
4507 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
4508 {
4509         struct ceph_osd *osd = con->private;
4510         if (get_osd(osd))
4511                 return con;
4512         return NULL;
4513 }
4514
4515 static void put_osd_con(struct ceph_connection *con)
4516 {
4517         struct ceph_osd *osd = con->private;
4518         put_osd(osd);
4519 }
4520
4521 /*
4522  * authentication
4523  */
4524 /*
4525  * Note: returned pointer is the address of a structure that's
4526  * managed separately.  Caller must *not* attempt to free it.
4527  */
4528 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
4529                                         int *proto, int force_new)
4530 {
4531         struct ceph_osd *o = con->private;
4532         struct ceph_osd_client *osdc = o->o_osdc;
4533         struct ceph_auth_client *ac = osdc->client->monc.auth;
4534         struct ceph_auth_handshake *auth = &o->o_auth;
4535
4536         if (force_new && auth->authorizer) {
4537                 ceph_auth_destroy_authorizer(auth->authorizer);
4538                 auth->authorizer = NULL;
4539         }
4540         if (!auth->authorizer) {
4541                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4542                                                       auth);
4543                 if (ret)
4544                         return ERR_PTR(ret);
4545         } else {
4546                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4547                                                      auth);
4548                 if (ret)
4549                         return ERR_PTR(ret);
4550         }
4551         *proto = ac->protocol;
4552
4553         return auth;
4554 }
4555
4556
4557 static int verify_authorizer_reply(struct ceph_connection *con)
4558 {
4559         struct ceph_osd *o = con->private;
4560         struct ceph_osd_client *osdc = o->o_osdc;
4561         struct ceph_auth_client *ac = osdc->client->monc.auth;
4562
4563         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
4564 }
4565
4566 static int invalidate_authorizer(struct ceph_connection *con)
4567 {
4568         struct ceph_osd *o = con->private;
4569         struct ceph_osd_client *osdc = o->o_osdc;
4570         struct ceph_auth_client *ac = osdc->client->monc.auth;
4571
4572         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
4573         return ceph_monc_validate_auth(&osdc->client->monc);
4574 }
4575
4576 static int osd_sign_message(struct ceph_msg *msg)
4577 {
4578         struct ceph_osd *o = msg->con->private;
4579         struct ceph_auth_handshake *auth = &o->o_auth;
4580
4581         return ceph_auth_sign_message(auth, msg);
4582 }
4583
4584 static int osd_check_message_signature(struct ceph_msg *msg)
4585 {
4586         struct ceph_osd *o = msg->con->private;
4587         struct ceph_auth_handshake *auth = &o->o_auth;
4588
4589         return ceph_auth_check_message_signature(auth, msg);
4590 }
4591
4592 static const struct ceph_connection_operations osd_con_ops = {
4593         .get = get_osd_con,
4594         .put = put_osd_con,
4595         .dispatch = dispatch,
4596         .get_authorizer = get_authorizer,
4597         .verify_authorizer_reply = verify_authorizer_reply,
4598         .invalidate_authorizer = invalidate_authorizer,
4599         .alloc_msg = alloc_msg,
4600         .sign_message = osd_sign_message,
4601         .check_message_signature = osd_check_message_signature,
4602         .fault = osd_fault,
4603 };