Commit | Line | Data |
---|---|---|
a4ce40a9 | 1 | |
3d14c5d2 | 2 | #include <linux/ceph/ceph_debug.h> |
f24e9980 | 3 | |
3d14c5d2 | 4 | #include <linux/module.h> |
f24e9980 SW |
5 | #include <linux/err.h> |
6 | #include <linux/highmem.h> | |
7 | #include <linux/mm.h> | |
8 | #include <linux/pagemap.h> | |
9 | #include <linux/slab.h> | |
10 | #include <linux/uaccess.h> | |
68b4476b YS |
11 | #ifdef CONFIG_BLOCK |
12 | #include <linux/bio.h> | |
13 | #endif | |
f24e9980 | 14 | |
3d14c5d2 YS |
15 | #include <linux/ceph/libceph.h> |
16 | #include <linux/ceph/osd_client.h> | |
17 | #include <linux/ceph/messenger.h> | |
18 | #include <linux/ceph/decode.h> | |
19 | #include <linux/ceph/auth.h> | |
20 | #include <linux/ceph/pagelist.h> | |
f24e9980 | 21 | |
c16e7869 SW |
22 | #define OSD_OP_FRONT_LEN 4096 |
23 | #define OSD_OPREPLY_FRONT_LEN 512 | |
0d59ab81 | 24 | |
5522ae0b AE |
25 | static struct kmem_cache *ceph_osd_request_cache; |
26 | ||
9e32789f | 27 | static const struct ceph_connection_operations osd_con_ops; |
f24e9980 | 28 | |
f9d25199 | 29 | static void __send_queued(struct ceph_osd_client *osdc); |
6f6c7006 | 30 | static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd); |
a40c4f10 YS |
31 | static void __register_request(struct ceph_osd_client *osdc, |
32 | struct ceph_osd_request *req); | |
33 | static void __unregister_linger_request(struct ceph_osd_client *osdc, | |
34 | struct ceph_osd_request *req); | |
56e925b6 SW |
35 | static void __send_request(struct ceph_osd_client *osdc, |
36 | struct ceph_osd_request *req); | |
f24e9980 SW |
37 | |
38 | /* | |
39 | * Implement client access to distributed object storage cluster. | |
40 | * | |
41 | * All data objects are stored within a cluster/cloud of OSDs, or | |
42 | * "object storage devices." (Note that Ceph OSDs have _nothing_ to | |
43 | * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply | |
44 | * remote daemons serving up and coordinating consistent and safe | |
45 | * access to storage. | |
46 | * | |
47 | * Cluster membership and the mapping of data objects onto storage devices | |
48 | * are described by the osd map. | |
49 | * | |
50 | * We keep track of pending OSD requests (read, write), resubmit | |
51 | * requests to different OSDs when the cluster topology/data layout | |
52 | * change, or retry the affected requests when the communications | |
53 | * channel with an OSD is reset. | |
54 | */ | |
55 | ||
56 | /* | |
57 | * calculate the mapping of a file extent onto an object, and fill out the | |
58 | * request accordingly. shorten extent as necessary if it crosses an | |
59 | * object boundary. | |
60 | * | |
61 | * fill osd op in request message. | |
62 | */ | |
dbe0fc41 | 63 | static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, |
a19dadfb | 64 | u64 *objnum, u64 *objoff, u64 *objlen) |
f24e9980 | 65 | { |
60e56f13 | 66 | u64 orig_len = *plen; |
d63b77f4 | 67 | int r; |
f24e9980 | 68 | |
60e56f13 | 69 | /* object extent? */ |
75d1c941 AE |
70 | r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum, |
71 | objoff, objlen); | |
d63b77f4 SW |
72 | if (r < 0) |
73 | return r; | |
75d1c941 AE |
74 | if (*objlen < orig_len) { |
75 | *plen = *objlen; | |
60e56f13 AE |
76 | dout(" skipping last %llu, final file extent %llu~%llu\n", |
77 | orig_len - *plen, off, *plen); | |
78 | } | |
79 | ||
75d1c941 | 80 | dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); |
f24e9980 | 81 | |
3ff5f385 | 82 | return 0; |
f24e9980 SW |
83 | } |
84 | ||
c54d47bf AE |
85 | static void ceph_osd_data_init(struct ceph_osd_data *osd_data) |
86 | { | |
87 | memset(osd_data, 0, sizeof (*osd_data)); | |
88 | osd_data->type = CEPH_OSD_DATA_TYPE_NONE; | |
89 | } | |
90 | ||
a4ce40a9 | 91 | static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, |
43bfe5de AE |
92 | struct page **pages, u64 length, u32 alignment, |
93 | bool pages_from_pool, bool own_pages) | |
94 | { | |
95 | osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; | |
96 | osd_data->pages = pages; | |
97 | osd_data->length = length; | |
98 | osd_data->alignment = alignment; | |
99 | osd_data->pages_from_pool = pages_from_pool; | |
100 | osd_data->own_pages = own_pages; | |
101 | } | |
43bfe5de | 102 | |
a4ce40a9 | 103 | static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, |
43bfe5de AE |
104 | struct ceph_pagelist *pagelist) |
105 | { | |
106 | osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; | |
107 | osd_data->pagelist = pagelist; | |
108 | } | |
43bfe5de AE |
109 | |
110 | #ifdef CONFIG_BLOCK | |
a4ce40a9 | 111 | static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, |
43bfe5de AE |
112 | struct bio *bio, size_t bio_length) |
113 | { | |
114 | osd_data->type = CEPH_OSD_DATA_TYPE_BIO; | |
115 | osd_data->bio = bio; | |
116 | osd_data->bio_length = bio_length; | |
117 | } | |
43bfe5de AE |
118 | #endif /* CONFIG_BLOCK */ |
119 | ||
863c7eb5 AE |
120 | #define osd_req_op_data(oreq, whch, typ, fld) \ |
121 | ({ \ | |
122 | BUG_ON(whch >= (oreq)->r_num_ops); \ | |
123 | &(oreq)->r_ops[whch].typ.fld; \ | |
124 | }) | |
125 | ||
49719778 AE |
126 | static struct ceph_osd_data * |
127 | osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) | |
128 | { | |
129 | BUG_ON(which >= osd_req->r_num_ops); | |
130 | ||
131 | return &osd_req->r_ops[which].raw_data_in; | |
132 | } | |
133 | ||
a4ce40a9 AE |
134 | struct ceph_osd_data * |
135 | osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, | |
406e2c9f | 136 | unsigned int which) |
a4ce40a9 | 137 | { |
863c7eb5 | 138 | return osd_req_op_data(osd_req, which, extent, osd_data); |
a4ce40a9 AE |
139 | } |
140 | EXPORT_SYMBOL(osd_req_op_extent_osd_data); | |
141 | ||
a4ce40a9 AE |
142 | struct ceph_osd_data * |
143 | osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, | |
144 | unsigned int which) | |
145 | { | |
863c7eb5 | 146 | return osd_req_op_data(osd_req, which, cls, response_data); |
a4ce40a9 AE |
147 | } |
148 | EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */ | |
149 | ||
49719778 AE |
150 | void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, |
151 | unsigned int which, struct page **pages, | |
152 | u64 length, u32 alignment, | |
153 | bool pages_from_pool, bool own_pages) | |
154 | { | |
155 | struct ceph_osd_data *osd_data; | |
156 | ||
157 | osd_data = osd_req_op_raw_data_in(osd_req, which); | |
158 | ceph_osd_data_pages_init(osd_data, pages, length, alignment, | |
159 | pages_from_pool, own_pages); | |
160 | } | |
161 | EXPORT_SYMBOL(osd_req_op_raw_data_in_pages); | |
162 | ||
a4ce40a9 | 163 | void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, |
406e2c9f AE |
164 | unsigned int which, struct page **pages, |
165 | u64 length, u32 alignment, | |
a4ce40a9 AE |
166 | bool pages_from_pool, bool own_pages) |
167 | { | |
168 | struct ceph_osd_data *osd_data; | |
169 | ||
863c7eb5 | 170 | osd_data = osd_req_op_data(osd_req, which, extent, osd_data); |
a4ce40a9 AE |
171 | ceph_osd_data_pages_init(osd_data, pages, length, alignment, |
172 | pages_from_pool, own_pages); | |
a4ce40a9 AE |
173 | } |
174 | EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); | |
175 | ||
176 | void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, | |
406e2c9f | 177 | unsigned int which, struct ceph_pagelist *pagelist) |
a4ce40a9 AE |
178 | { |
179 | struct ceph_osd_data *osd_data; | |
180 | ||
863c7eb5 | 181 | osd_data = osd_req_op_data(osd_req, which, extent, osd_data); |
a4ce40a9 | 182 | ceph_osd_data_pagelist_init(osd_data, pagelist); |
a4ce40a9 AE |
183 | } |
184 | EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); | |
185 | ||
186 | #ifdef CONFIG_BLOCK | |
187 | void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, | |
406e2c9f | 188 | unsigned int which, struct bio *bio, size_t bio_length) |
a4ce40a9 AE |
189 | { |
190 | struct ceph_osd_data *osd_data; | |
863c7eb5 AE |
191 | |
192 | osd_data = osd_req_op_data(osd_req, which, extent, osd_data); | |
a4ce40a9 | 193 | ceph_osd_data_bio_init(osd_data, bio, bio_length); |
a4ce40a9 AE |
194 | } |
195 | EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); | |
196 | #endif /* CONFIG_BLOCK */ | |
197 | ||
198 | static void osd_req_op_cls_request_info_pagelist( | |
199 | struct ceph_osd_request *osd_req, | |
200 | unsigned int which, struct ceph_pagelist *pagelist) | |
201 | { | |
202 | struct ceph_osd_data *osd_data; | |
203 | ||
863c7eb5 | 204 | osd_data = osd_req_op_data(osd_req, which, cls, request_info); |
a4ce40a9 | 205 | ceph_osd_data_pagelist_init(osd_data, pagelist); |
a4ce40a9 AE |
206 | } |
207 | ||
04017e29 AE |
208 | void osd_req_op_cls_request_data_pagelist( |
209 | struct ceph_osd_request *osd_req, | |
210 | unsigned int which, struct ceph_pagelist *pagelist) | |
211 | { | |
212 | struct ceph_osd_data *osd_data; | |
213 | ||
863c7eb5 | 214 | osd_data = osd_req_op_data(osd_req, which, cls, request_data); |
04017e29 AE |
215 | ceph_osd_data_pagelist_init(osd_data, pagelist); |
216 | } | |
217 | EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); | |
218 | ||
6c57b554 AE |
219 | void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, |
220 | unsigned int which, struct page **pages, u64 length, | |
221 | u32 alignment, bool pages_from_pool, bool own_pages) | |
222 | { | |
223 | struct ceph_osd_data *osd_data; | |
224 | ||
225 | osd_data = osd_req_op_data(osd_req, which, cls, request_data); | |
226 | ceph_osd_data_pages_init(osd_data, pages, length, alignment, | |
227 | pages_from_pool, own_pages); | |
228 | } | |
229 | EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); | |
230 | ||
a4ce40a9 AE |
231 | void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, |
232 | unsigned int which, struct page **pages, u64 length, | |
233 | u32 alignment, bool pages_from_pool, bool own_pages) | |
234 | { | |
235 | struct ceph_osd_data *osd_data; | |
236 | ||
863c7eb5 | 237 | osd_data = osd_req_op_data(osd_req, which, cls, response_data); |
a4ce40a9 AE |
238 | ceph_osd_data_pages_init(osd_data, pages, length, alignment, |
239 | pages_from_pool, own_pages); | |
a4ce40a9 AE |
240 | } |
241 | EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); | |
242 | ||
23c08a9c AE |
243 | static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) |
244 | { | |
245 | switch (osd_data->type) { | |
246 | case CEPH_OSD_DATA_TYPE_NONE: | |
247 | return 0; | |
248 | case CEPH_OSD_DATA_TYPE_PAGES: | |
249 | return osd_data->length; | |
250 | case CEPH_OSD_DATA_TYPE_PAGELIST: | |
251 | return (u64)osd_data->pagelist->length; | |
252 | #ifdef CONFIG_BLOCK | |
253 | case CEPH_OSD_DATA_TYPE_BIO: | |
254 | return (u64)osd_data->bio_length; | |
255 | #endif /* CONFIG_BLOCK */ | |
256 | default: | |
257 | WARN(true, "unrecognized data type %d\n", (int)osd_data->type); | |
258 | return 0; | |
259 | } | |
260 | } | |
261 | ||
c54d47bf AE |
262 | static void ceph_osd_data_release(struct ceph_osd_data *osd_data) |
263 | { | |
5476492f | 264 | if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { |
c54d47bf AE |
265 | int num_pages; |
266 | ||
267 | num_pages = calc_pages_for((u64)osd_data->alignment, | |
268 | (u64)osd_data->length); | |
269 | ceph_release_page_vector(osd_data->pages, num_pages); | |
270 | } | |
5476492f AE |
271 | ceph_osd_data_init(osd_data); |
272 | } | |
273 | ||
274 | static void osd_req_op_data_release(struct ceph_osd_request *osd_req, | |
275 | unsigned int which) | |
276 | { | |
277 | struct ceph_osd_req_op *op; | |
278 | ||
279 | BUG_ON(which >= osd_req->r_num_ops); | |
280 | op = &osd_req->r_ops[which]; | |
281 | ||
282 | switch (op->op) { | |
283 | case CEPH_OSD_OP_READ: | |
284 | case CEPH_OSD_OP_WRITE: | |
285 | ceph_osd_data_release(&op->extent.osd_data); | |
286 | break; | |
287 | case CEPH_OSD_OP_CALL: | |
288 | ceph_osd_data_release(&op->cls.request_info); | |
04017e29 | 289 | ceph_osd_data_release(&op->cls.request_data); |
5476492f AE |
290 | ceph_osd_data_release(&op->cls.response_data); |
291 | break; | |
292 | default: | |
293 | break; | |
294 | } | |
c54d47bf AE |
295 | } |
296 | ||
f24e9980 SW |
297 | /* |
298 | * requests | |
299 | */ | |
9e94af20 | 300 | static void ceph_osdc_release_request(struct kref *kref) |
f24e9980 | 301 | { |
9e94af20 ID |
302 | struct ceph_osd_request *req = container_of(kref, |
303 | struct ceph_osd_request, r_kref); | |
5476492f | 304 | unsigned int which; |
415e49a9 | 305 | |
9e94af20 ID |
306 | dout("%s %p (r_request %p r_reply %p)\n", __func__, req, |
307 | req->r_request, req->r_reply); | |
6562d661 ID |
308 | WARN_ON(!RB_EMPTY_NODE(&req->r_node)); |
309 | WARN_ON(!list_empty(&req->r_req_lru_item)); | |
310 | WARN_ON(!list_empty(&req->r_osd_item)); | |
311 | WARN_ON(!list_empty(&req->r_linger_item)); | |
312 | WARN_ON(!list_empty(&req->r_linger_osd_item)); | |
313 | WARN_ON(req->r_osd); | |
9e94af20 | 314 | |
415e49a9 SW |
315 | if (req->r_request) |
316 | ceph_msg_put(req->r_request); | |
ace6d3a9 | 317 | if (req->r_reply) { |
8921d114 | 318 | ceph_msg_revoke_incoming(req->r_reply); |
ab8cb34a | 319 | ceph_msg_put(req->r_reply); |
ace6d3a9 | 320 | } |
0fff87ec | 321 | |
5476492f AE |
322 | for (which = 0; which < req->r_num_ops; which++) |
323 | osd_req_op_data_release(req, which); | |
0fff87ec | 324 | |
415e49a9 SW |
325 | ceph_put_snap_context(req->r_snapc); |
326 | if (req->r_mempool) | |
327 | mempool_free(req, req->r_osdc->req_mempool); | |
328 | else | |
5522ae0b AE |
329 | kmem_cache_free(ceph_osd_request_cache, req); |
330 | ||
f24e9980 | 331 | } |
9e94af20 ID |
332 | |
333 | void ceph_osdc_get_request(struct ceph_osd_request *req) | |
334 | { | |
335 | dout("%s %p (was %d)\n", __func__, req, | |
336 | atomic_read(&req->r_kref.refcount)); | |
337 | kref_get(&req->r_kref); | |
338 | } | |
339 | EXPORT_SYMBOL(ceph_osdc_get_request); | |
340 | ||
341 | void ceph_osdc_put_request(struct ceph_osd_request *req) | |
342 | { | |
343 | dout("%s %p (was %d)\n", __func__, req, | |
344 | atomic_read(&req->r_kref.refcount)); | |
345 | kref_put(&req->r_kref, ceph_osdc_release_request); | |
346 | } | |
347 | EXPORT_SYMBOL(ceph_osdc_put_request); | |
68b4476b | 348 | |
3499e8a5 | 349 | struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, |
f24e9980 | 350 | struct ceph_snap_context *snapc, |
1b83bef2 | 351 | unsigned int num_ops, |
3499e8a5 | 352 | bool use_mempool, |
54a54007 | 353 | gfp_t gfp_flags) |
f24e9980 SW |
354 | { |
355 | struct ceph_osd_request *req; | |
356 | struct ceph_msg *msg; | |
1b83bef2 SW |
357 | size_t msg_size; |
358 | ||
79528734 AE |
359 | BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX); |
360 | BUG_ON(num_ops > CEPH_OSD_MAX_OP); | |
361 | ||
1b83bef2 SW |
362 | msg_size = 4 + 4 + 8 + 8 + 4+8; |
363 | msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ | |
364 | msg_size += 1 + 8 + 4 + 4; /* pg_t */ | |
2d0ebc5d | 365 | msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */ |
1b83bef2 SW |
366 | msg_size += 2 + num_ops*sizeof(struct ceph_osd_op); |
367 | msg_size += 8; /* snapid */ | |
368 | msg_size += 8; /* snap_seq */ | |
369 | msg_size += 8 * (snapc ? snapc->num_snaps : 0); /* snaps */ | |
370 | msg_size += 4; | |
f24e9980 SW |
371 | |
372 | if (use_mempool) { | |
3499e8a5 | 373 | req = mempool_alloc(osdc->req_mempool, gfp_flags); |
f24e9980 SW |
374 | memset(req, 0, sizeof(*req)); |
375 | } else { | |
5522ae0b | 376 | req = kmem_cache_zalloc(ceph_osd_request_cache, gfp_flags); |
f24e9980 SW |
377 | } |
378 | if (req == NULL) | |
a79832f2 | 379 | return NULL; |
f24e9980 | 380 | |
f24e9980 SW |
381 | req->r_osdc = osdc; |
382 | req->r_mempool = use_mempool; | |
79528734 | 383 | req->r_num_ops = num_ops; |
68b4476b | 384 | |
415e49a9 | 385 | kref_init(&req->r_kref); |
f24e9980 SW |
386 | init_completion(&req->r_completion); |
387 | init_completion(&req->r_safe_completion); | |
a978fa20 | 388 | RB_CLEAR_NODE(&req->r_node); |
f24e9980 | 389 | INIT_LIST_HEAD(&req->r_unsafe_item); |
a40c4f10 | 390 | INIT_LIST_HEAD(&req->r_linger_item); |
1d0326b1 | 391 | INIT_LIST_HEAD(&req->r_linger_osd_item); |
935b639a | 392 | INIT_LIST_HEAD(&req->r_req_lru_item); |
cd43045c SW |
393 | INIT_LIST_HEAD(&req->r_osd_item); |
394 | ||
3c972c95 | 395 | req->r_base_oloc.pool = -1; |
205ee118 | 396 | req->r_target_oloc.pool = -1; |
22116525 | 397 | |
c16e7869 SW |
398 | /* create reply message */ |
399 | if (use_mempool) | |
400 | msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); | |
401 | else | |
402 | msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, | |
b61c2763 | 403 | OSD_OPREPLY_FRONT_LEN, gfp_flags, true); |
a79832f2 | 404 | if (!msg) { |
c16e7869 | 405 | ceph_osdc_put_request(req); |
a79832f2 | 406 | return NULL; |
c16e7869 SW |
407 | } |
408 | req->r_reply = msg; | |
409 | ||
410 | /* create request message; allow space for oid */ | |
f24e9980 | 411 | if (use_mempool) |
8f3bc053 | 412 | msg = ceph_msgpool_get(&osdc->msgpool_op, 0); |
f24e9980 | 413 | else |
b61c2763 | 414 | msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true); |
a79832f2 | 415 | if (!msg) { |
f24e9980 | 416 | ceph_osdc_put_request(req); |
a79832f2 | 417 | return NULL; |
f24e9980 | 418 | } |
68b4476b | 419 | |
f24e9980 | 420 | memset(msg->front.iov_base, 0, msg->front.iov_len); |
3499e8a5 YS |
421 | |
422 | req->r_request = msg; | |
3499e8a5 YS |
423 | |
424 | return req; | |
425 | } | |
3d14c5d2 | 426 | EXPORT_SYMBOL(ceph_osdc_alloc_request); |
3499e8a5 | 427 | |
a8dd0a37 | 428 | static bool osd_req_opcode_valid(u16 opcode) |
68b4476b | 429 | { |
a8dd0a37 | 430 | switch (opcode) { |
68b4476b | 431 | case CEPH_OSD_OP_READ: |
a8dd0a37 | 432 | case CEPH_OSD_OP_STAT: |
4c46459c AE |
433 | case CEPH_OSD_OP_MAPEXT: |
434 | case CEPH_OSD_OP_MASKTRUNC: | |
435 | case CEPH_OSD_OP_SPARSE_READ: | |
a9f36c3e | 436 | case CEPH_OSD_OP_NOTIFY: |
a8dd0a37 | 437 | case CEPH_OSD_OP_NOTIFY_ACK: |
4c46459c | 438 | case CEPH_OSD_OP_ASSERT_VER: |
a8dd0a37 | 439 | case CEPH_OSD_OP_WRITE: |
4c46459c AE |
440 | case CEPH_OSD_OP_WRITEFULL: |
441 | case CEPH_OSD_OP_TRUNCATE: | |
442 | case CEPH_OSD_OP_ZERO: | |
443 | case CEPH_OSD_OP_DELETE: | |
444 | case CEPH_OSD_OP_APPEND: | |
a8dd0a37 | 445 | case CEPH_OSD_OP_STARTSYNC: |
4c46459c AE |
446 | case CEPH_OSD_OP_SETTRUNC: |
447 | case CEPH_OSD_OP_TRIMTRUNC: | |
448 | case CEPH_OSD_OP_TMAPUP: | |
449 | case CEPH_OSD_OP_TMAPPUT: | |
450 | case CEPH_OSD_OP_TMAPGET: | |
451 | case CEPH_OSD_OP_CREATE: | |
a9f36c3e | 452 | case CEPH_OSD_OP_ROLLBACK: |
a8dd0a37 | 453 | case CEPH_OSD_OP_WATCH: |
4c46459c AE |
454 | case CEPH_OSD_OP_OMAPGETKEYS: |
455 | case CEPH_OSD_OP_OMAPGETVALS: | |
456 | case CEPH_OSD_OP_OMAPGETHEADER: | |
457 | case CEPH_OSD_OP_OMAPGETVALSBYKEYS: | |
4c46459c AE |
458 | case CEPH_OSD_OP_OMAPSETVALS: |
459 | case CEPH_OSD_OP_OMAPSETHEADER: | |
460 | case CEPH_OSD_OP_OMAPCLEAR: | |
461 | case CEPH_OSD_OP_OMAPRMKEYS: | |
462 | case CEPH_OSD_OP_OMAP_CMP: | |
c647b8a8 | 463 | case CEPH_OSD_OP_SETALLOCHINT: |
4c46459c AE |
464 | case CEPH_OSD_OP_CLONERANGE: |
465 | case CEPH_OSD_OP_ASSERT_SRC_VERSION: | |
466 | case CEPH_OSD_OP_SRC_CMPXATTR: | |
a9f36c3e | 467 | case CEPH_OSD_OP_GETXATTR: |
4c46459c | 468 | case CEPH_OSD_OP_GETXATTRS: |
a9f36c3e AE |
469 | case CEPH_OSD_OP_CMPXATTR: |
470 | case CEPH_OSD_OP_SETXATTR: | |
4c46459c AE |
471 | case CEPH_OSD_OP_SETXATTRS: |
472 | case CEPH_OSD_OP_RESETXATTRS: | |
473 | case CEPH_OSD_OP_RMXATTR: | |
474 | case CEPH_OSD_OP_PULL: | |
475 | case CEPH_OSD_OP_PUSH: | |
476 | case CEPH_OSD_OP_BALANCEREADS: | |
477 | case CEPH_OSD_OP_UNBALANCEREADS: | |
478 | case CEPH_OSD_OP_SCRUB: | |
479 | case CEPH_OSD_OP_SCRUB_RESERVE: | |
480 | case CEPH_OSD_OP_SCRUB_UNRESERVE: | |
481 | case CEPH_OSD_OP_SCRUB_STOP: | |
482 | case CEPH_OSD_OP_SCRUB_MAP: | |
483 | case CEPH_OSD_OP_WRLOCK: | |
484 | case CEPH_OSD_OP_WRUNLOCK: | |
485 | case CEPH_OSD_OP_RDLOCK: | |
486 | case CEPH_OSD_OP_RDUNLOCK: | |
487 | case CEPH_OSD_OP_UPLOCK: | |
488 | case CEPH_OSD_OP_DNLOCK: | |
a8dd0a37 | 489 | case CEPH_OSD_OP_CALL: |
4c46459c AE |
490 | case CEPH_OSD_OP_PGLS: |
491 | case CEPH_OSD_OP_PGLS_FILTER: | |
a8dd0a37 AE |
492 | return true; |
493 | default: | |
494 | return false; | |
495 | } | |
496 | } | |
497 | ||
33803f33 AE |
498 | /* |
499 | * This is an osd op init function for opcodes that have no data or | |
500 | * other information associated with them. It also serves as a | |
501 | * common init routine for all the other init functions, below. | |
502 | */ | |
c99d2d4a | 503 | static struct ceph_osd_req_op * |
49719778 | 504 | _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, |
c99d2d4a | 505 | u16 opcode) |
33803f33 | 506 | { |
c99d2d4a AE |
507 | struct ceph_osd_req_op *op; |
508 | ||
509 | BUG_ON(which >= osd_req->r_num_ops); | |
33803f33 AE |
510 | BUG_ON(!osd_req_opcode_valid(opcode)); |
511 | ||
c99d2d4a | 512 | op = &osd_req->r_ops[which]; |
33803f33 | 513 | memset(op, 0, sizeof (*op)); |
33803f33 | 514 | op->op = opcode; |
c99d2d4a AE |
515 | |
516 | return op; | |
33803f33 AE |
517 | } |
518 | ||
49719778 AE |
519 | void osd_req_op_init(struct ceph_osd_request *osd_req, |
520 | unsigned int which, u16 opcode) | |
521 | { | |
522 | (void)_osd_req_op_init(osd_req, which, opcode); | |
523 | } | |
524 | EXPORT_SYMBOL(osd_req_op_init); | |
525 | ||
c99d2d4a AE |
526 | void osd_req_op_extent_init(struct ceph_osd_request *osd_req, |
527 | unsigned int which, u16 opcode, | |
33803f33 AE |
528 | u64 offset, u64 length, |
529 | u64 truncate_size, u32 truncate_seq) | |
530 | { | |
49719778 | 531 | struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); |
33803f33 AE |
532 | size_t payload_len = 0; |
533 | ||
ad7a60de LW |
534 | BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && |
535 | opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO && | |
536 | opcode != CEPH_OSD_OP_TRUNCATE); | |
33803f33 | 537 | |
33803f33 AE |
538 | op->extent.offset = offset; |
539 | op->extent.length = length; | |
540 | op->extent.truncate_size = truncate_size; | |
541 | op->extent.truncate_seq = truncate_seq; | |
542 | if (opcode == CEPH_OSD_OP_WRITE) | |
543 | payload_len += length; | |
544 | ||
545 | op->payload_len = payload_len; | |
546 | } | |
547 | EXPORT_SYMBOL(osd_req_op_extent_init); | |
548 | ||
c99d2d4a AE |
549 | void osd_req_op_extent_update(struct ceph_osd_request *osd_req, |
550 | unsigned int which, u64 length) | |
e5975c7c | 551 | { |
c99d2d4a AE |
552 | struct ceph_osd_req_op *op; |
553 | u64 previous; | |
554 | ||
555 | BUG_ON(which >= osd_req->r_num_ops); | |
556 | op = &osd_req->r_ops[which]; | |
557 | previous = op->extent.length; | |
e5975c7c AE |
558 | |
559 | if (length == previous) | |
560 | return; /* Nothing to do */ | |
561 | BUG_ON(length > previous); | |
562 | ||
563 | op->extent.length = length; | |
564 | op->payload_len -= previous - length; | |
565 | } | |
566 | EXPORT_SYMBOL(osd_req_op_extent_update); | |
567 | ||
c99d2d4a | 568 | void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, |
04017e29 | 569 | u16 opcode, const char *class, const char *method) |
33803f33 | 570 | { |
49719778 | 571 | struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); |
5f562df5 | 572 | struct ceph_pagelist *pagelist; |
33803f33 AE |
573 | size_t payload_len = 0; |
574 | size_t size; | |
575 | ||
576 | BUG_ON(opcode != CEPH_OSD_OP_CALL); | |
577 | ||
5f562df5 AE |
578 | pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); |
579 | BUG_ON(!pagelist); | |
580 | ceph_pagelist_init(pagelist); | |
581 | ||
33803f33 AE |
582 | op->cls.class_name = class; |
583 | size = strlen(class); | |
584 | BUG_ON(size > (size_t) U8_MAX); | |
585 | op->cls.class_len = size; | |
5f562df5 | 586 | ceph_pagelist_append(pagelist, class, size); |
33803f33 AE |
587 | payload_len += size; |
588 | ||
589 | op->cls.method_name = method; | |
590 | size = strlen(method); | |
591 | BUG_ON(size > (size_t) U8_MAX); | |
592 | op->cls.method_len = size; | |
5f562df5 | 593 | ceph_pagelist_append(pagelist, method, size); |
33803f33 AE |
594 | payload_len += size; |
595 | ||
a4ce40a9 | 596 | osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); |
5f562df5 | 597 | |
33803f33 AE |
598 | op->cls.argc = 0; /* currently unused */ |
599 | ||
600 | op->payload_len = payload_len; | |
601 | } | |
602 | EXPORT_SYMBOL(osd_req_op_cls_init); | |
8c042b0d | 603 | |
c99d2d4a AE |
604 | void osd_req_op_watch_init(struct ceph_osd_request *osd_req, |
605 | unsigned int which, u16 opcode, | |
33803f33 AE |
606 | u64 cookie, u64 version, int flag) |
607 | { | |
49719778 | 608 | struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); |
33803f33 | 609 | |
c99d2d4a | 610 | BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); |
33803f33 AE |
611 | |
612 | op->watch.cookie = cookie; | |
9ef1ee5a | 613 | op->watch.ver = version; |
33803f33 | 614 | if (opcode == CEPH_OSD_OP_WATCH && flag) |
c99d2d4a | 615 | op->watch.flag = (u8)1; |
33803f33 AE |
616 | } |
617 | EXPORT_SYMBOL(osd_req_op_watch_init); | |
618 | ||
c647b8a8 ID |
619 | void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, |
620 | unsigned int which, | |
621 | u64 expected_object_size, | |
622 | u64 expected_write_size) | |
623 | { | |
624 | struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, | |
625 | CEPH_OSD_OP_SETALLOCHINT); | |
626 | ||
627 | op->alloc_hint.expected_object_size = expected_object_size; | |
628 | op->alloc_hint.expected_write_size = expected_write_size; | |
629 | ||
630 | /* | |
631 | * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed | |
632 | * not worth a feature bit. Set FAILOK per-op flag to make | |
633 | * sure older osds don't trip over an unsupported opcode. | |
634 | */ | |
635 | op->flags |= CEPH_OSD_OP_FLAG_FAILOK; | |
636 | } | |
637 | EXPORT_SYMBOL(osd_req_op_alloc_hint_init); | |
638 | ||
90af3602 | 639 | static void ceph_osdc_msg_data_add(struct ceph_msg *msg, |
ec9123c5 AE |
640 | struct ceph_osd_data *osd_data) |
641 | { | |
642 | u64 length = ceph_osd_data_length(osd_data); | |
643 | ||
644 | if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { | |
645 | BUG_ON(length > (u64) SIZE_MAX); | |
646 | if (length) | |
90af3602 | 647 | ceph_msg_data_add_pages(msg, osd_data->pages, |
ec9123c5 AE |
648 | length, osd_data->alignment); |
649 | } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { | |
650 | BUG_ON(!length); | |
90af3602 | 651 | ceph_msg_data_add_pagelist(msg, osd_data->pagelist); |
ec9123c5 AE |
652 | #ifdef CONFIG_BLOCK |
653 | } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { | |
90af3602 | 654 | ceph_msg_data_add_bio(msg, osd_data->bio, length); |
ec9123c5 AE |
655 | #endif |
656 | } else { | |
657 | BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); | |
658 | } | |
659 | } | |
660 | ||
a8dd0a37 | 661 | static u64 osd_req_encode_op(struct ceph_osd_request *req, |
79528734 | 662 | struct ceph_osd_op *dst, unsigned int which) |
a8dd0a37 | 663 | { |
79528734 | 664 | struct ceph_osd_req_op *src; |
04017e29 | 665 | struct ceph_osd_data *osd_data; |
54d50649 | 666 | u64 request_data_len = 0; |
04017e29 | 667 | u64 data_length; |
a8dd0a37 | 668 | |
79528734 AE |
669 | BUG_ON(which >= req->r_num_ops); |
670 | src = &req->r_ops[which]; | |
a8dd0a37 AE |
671 | if (WARN_ON(!osd_req_opcode_valid(src->op))) { |
672 | pr_err("unrecognized osd opcode %d\n", src->op); | |
673 | ||
674 | return 0; | |
675 | } | |
676 | ||
677 | switch (src->op) { | |
678 | case CEPH_OSD_OP_STAT: | |
49719778 AE |
679 | osd_data = &src->raw_data_in; |
680 | ceph_osdc_msg_data_add(req->r_reply, osd_data); | |
a8dd0a37 AE |
681 | break; |
682 | case CEPH_OSD_OP_READ: | |
683 | case CEPH_OSD_OP_WRITE: | |
ad7a60de LW |
684 | case CEPH_OSD_OP_ZERO: |
685 | case CEPH_OSD_OP_DELETE: | |
686 | case CEPH_OSD_OP_TRUNCATE: | |
a8dd0a37 | 687 | if (src->op == CEPH_OSD_OP_WRITE) |
54d50649 | 688 | request_data_len = src->extent.length; |
a8dd0a37 AE |
689 | dst->extent.offset = cpu_to_le64(src->extent.offset); |
690 | dst->extent.length = cpu_to_le64(src->extent.length); | |
691 | dst->extent.truncate_size = | |
692 | cpu_to_le64(src->extent.truncate_size); | |
693 | dst->extent.truncate_seq = | |
694 | cpu_to_le32(src->extent.truncate_seq); | |
04017e29 | 695 | osd_data = &src->extent.osd_data; |
5476492f | 696 | if (src->op == CEPH_OSD_OP_WRITE) |
04017e29 | 697 | ceph_osdc_msg_data_add(req->r_request, osd_data); |
5476492f | 698 | else |
04017e29 | 699 | ceph_osdc_msg_data_add(req->r_reply, osd_data); |
a8dd0a37 AE |
700 | break; |
701 | case CEPH_OSD_OP_CALL: | |
a8dd0a37 AE |
702 | dst->cls.class_len = src->cls.class_len; |
703 | dst->cls.method_len = src->cls.method_len; | |
04017e29 AE |
704 | osd_data = &src->cls.request_info; |
705 | ceph_osdc_msg_data_add(req->r_request, osd_data); | |
706 | BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST); | |
707 | request_data_len = osd_data->pagelist->length; | |
708 | ||
709 | osd_data = &src->cls.request_data; | |
710 | data_length = ceph_osd_data_length(osd_data); | |
711 | if (data_length) { | |
712 | BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE); | |
713 | dst->cls.indata_len = cpu_to_le32(data_length); | |
714 | ceph_osdc_msg_data_add(req->r_request, osd_data); | |
715 | src->payload_len += data_length; | |
716 | request_data_len += data_length; | |
717 | } | |
718 | osd_data = &src->cls.response_data; | |
719 | ceph_osdc_msg_data_add(req->r_reply, osd_data); | |
a8dd0a37 AE |
720 | break; |
721 | case CEPH_OSD_OP_STARTSYNC: | |
722 | break; | |
723 | case CEPH_OSD_OP_NOTIFY_ACK: | |
724 | case CEPH_OSD_OP_WATCH: | |
725 | dst->watch.cookie = cpu_to_le64(src->watch.cookie); | |
726 | dst->watch.ver = cpu_to_le64(src->watch.ver); | |
727 | dst->watch.flag = src->watch.flag; | |
728 | break; | |
c647b8a8 ID |
729 | case CEPH_OSD_OP_SETALLOCHINT: |
730 | dst->alloc_hint.expected_object_size = | |
731 | cpu_to_le64(src->alloc_hint.expected_object_size); | |
732 | dst->alloc_hint.expected_write_size = | |
733 | cpu_to_le64(src->alloc_hint.expected_write_size); | |
734 | break; | |
a8dd0a37 | 735 | default: |
4c46459c | 736 | pr_err("unsupported osd opcode %s\n", |
8f63ca2d | 737 | ceph_osd_op_name(src->op)); |
4c46459c | 738 | WARN_ON(1); |
a8dd0a37 AE |
739 | |
740 | return 0; | |
68b4476b | 741 | } |
7b25bf5f | 742 | |
a8dd0a37 | 743 | dst->op = cpu_to_le16(src->op); |
7b25bf5f | 744 | dst->flags = cpu_to_le32(src->flags); |
68b4476b | 745 | dst->payload_len = cpu_to_le32(src->payload_len); |
175face2 | 746 | |
54d50649 | 747 | return request_data_len; |
68b4476b YS |
748 | } |
749 | ||
3499e8a5 YS |
750 | /* |
751 | * build new request AND message, calculate layout, and adjust file | |
752 | * extent as needed. | |
753 | * | |
754 | * if the file was recently truncated, we include information about its | |
755 | * old and new size so that the object can be updated appropriately. (we | |
756 | * avoid synchronously deleting truncated objects because it's slow.) | |
757 | * | |
758 | * if @do_sync, include a 'startsync' command so that the osd will flush | |
759 | * data quickly. | |
760 | */ | |
761 | struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, | |
762 | struct ceph_file_layout *layout, | |
763 | struct ceph_vino vino, | |
acead002 | 764 | u64 off, u64 *plen, int num_ops, |
3499e8a5 YS |
765 | int opcode, int flags, |
766 | struct ceph_snap_context *snapc, | |
3499e8a5 YS |
767 | u32 truncate_seq, |
768 | u64 truncate_size, | |
153e5167 | 769 | bool use_mempool) |
3499e8a5 | 770 | { |
68b4476b | 771 | struct ceph_osd_request *req; |
75d1c941 AE |
772 | u64 objnum = 0; |
773 | u64 objoff = 0; | |
774 | u64 objlen = 0; | |
d18d1e28 AE |
775 | u32 object_size; |
776 | u64 object_base; | |
6816282d | 777 | int r; |
68b4476b | 778 | |
ad7a60de LW |
779 | BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && |
780 | opcode != CEPH_OSD_OP_DELETE && opcode != CEPH_OSD_OP_ZERO && | |
781 | opcode != CEPH_OSD_OP_TRUNCATE); | |
68b4476b | 782 | |
acead002 | 783 | req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, |
ae7ca4a3 | 784 | GFP_NOFS); |
4ad12621 | 785 | if (!req) |
6816282d | 786 | return ERR_PTR(-ENOMEM); |
79528734 | 787 | |
d178a9e7 | 788 | req->r_flags = flags; |
3499e8a5 YS |
789 | |
790 | /* calculate max write size */ | |
a19dadfb | 791 | r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); |
3ff5f385 AE |
792 | if (r < 0) { |
793 | ceph_osdc_put_request(req); | |
6816282d | 794 | return ERR_PTR(r); |
3ff5f385 | 795 | } |
a19dadfb | 796 | |
d18d1e28 AE |
797 | object_size = le32_to_cpu(layout->fl_object_size); |
798 | object_base = off - objoff; | |
ccca4e37 YZ |
799 | if (!(truncate_seq == 1 && truncate_size == -1ULL)) { |
800 | if (truncate_size <= object_base) { | |
801 | truncate_size = 0; | |
802 | } else { | |
803 | truncate_size -= object_base; | |
804 | if (truncate_size > object_size) | |
805 | truncate_size = object_size; | |
806 | } | |
a19dadfb | 807 | } |
d18d1e28 | 808 | |
c99d2d4a | 809 | osd_req_op_extent_init(req, 0, opcode, objoff, objlen, |
b0270324 | 810 | truncate_size, truncate_seq); |
8c042b0d | 811 | |
acead002 AE |
812 | /* |
813 | * A second op in the ops array means the caller wants to | |
814 | * also issue a include a 'startsync' command so that the | |
815 | * osd will flush data quickly. | |
816 | */ | |
817 | if (num_ops > 1) | |
c99d2d4a | 818 | osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC); |
d18d1e28 | 819 | |
3c972c95 | 820 | req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout); |
3499e8a5 | 821 | |
3c972c95 | 822 | snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name), |
4295f221 | 823 | "%llx.%08llx", vino.ino, objnum); |
3c972c95 | 824 | req->r_base_oid.name_len = strlen(req->r_base_oid.name); |
dbe0fc41 | 825 | |
f24e9980 SW |
826 | return req; |
827 | } | |
3d14c5d2 | 828 | EXPORT_SYMBOL(ceph_osdc_new_request); |
f24e9980 SW |
829 | |
830 | /* | |
831 | * We keep osd requests in an rbtree, sorted by ->r_tid. | |
832 | */ | |
833 | static void __insert_request(struct ceph_osd_client *osdc, | |
834 | struct ceph_osd_request *new) | |
835 | { | |
836 | struct rb_node **p = &osdc->requests.rb_node; | |
837 | struct rb_node *parent = NULL; | |
838 | struct ceph_osd_request *req = NULL; | |
839 | ||
840 | while (*p) { | |
841 | parent = *p; | |
842 | req = rb_entry(parent, struct ceph_osd_request, r_node); | |
843 | if (new->r_tid < req->r_tid) | |
844 | p = &(*p)->rb_left; | |
845 | else if (new->r_tid > req->r_tid) | |
846 | p = &(*p)->rb_right; | |
847 | else | |
848 | BUG(); | |
849 | } | |
850 | ||
851 | rb_link_node(&new->r_node, parent, p); | |
852 | rb_insert_color(&new->r_node, &osdc->requests); | |
853 | } | |
854 | ||
855 | static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc, | |
856 | u64 tid) | |
857 | { | |
858 | struct ceph_osd_request *req; | |
859 | struct rb_node *n = osdc->requests.rb_node; | |
860 | ||
861 | while (n) { | |
862 | req = rb_entry(n, struct ceph_osd_request, r_node); | |
863 | if (tid < req->r_tid) | |
864 | n = n->rb_left; | |
865 | else if (tid > req->r_tid) | |
866 | n = n->rb_right; | |
867 | else | |
868 | return req; | |
869 | } | |
870 | return NULL; | |
871 | } | |
872 | ||
873 | static struct ceph_osd_request * | |
874 | __lookup_request_ge(struct ceph_osd_client *osdc, | |
875 | u64 tid) | |
876 | { | |
877 | struct ceph_osd_request *req; | |
878 | struct rb_node *n = osdc->requests.rb_node; | |
879 | ||
880 | while (n) { | |
881 | req = rb_entry(n, struct ceph_osd_request, r_node); | |
882 | if (tid < req->r_tid) { | |
883 | if (!n->rb_left) | |
884 | return req; | |
885 | n = n->rb_left; | |
886 | } else if (tid > req->r_tid) { | |
887 | n = n->rb_right; | |
888 | } else { | |
889 | return req; | |
890 | } | |
891 | } | |
892 | return NULL; | |
893 | } | |
894 | ||
6f6c7006 SW |
895 | /* |
896 | * Resubmit requests pending on the given osd. | |
897 | */ | |
898 | static void __kick_osd_requests(struct ceph_osd_client *osdc, | |
899 | struct ceph_osd *osd) | |
900 | { | |
a40c4f10 | 901 | struct ceph_osd_request *req, *nreq; |
e02493c0 | 902 | LIST_HEAD(resend); |
6f6c7006 SW |
903 | int err; |
904 | ||
905 | dout("__kick_osd_requests osd%d\n", osd->o_osd); | |
906 | err = __reset_osd(osdc, osd); | |
685a7555 | 907 | if (err) |
6f6c7006 | 908 | return; |
e02493c0 AE |
909 | /* |
910 | * Build up a list of requests to resend by traversing the | |
911 | * osd's list of requests. Requests for a given object are | |
912 | * sent in tid order, and that is also the order they're | |
913 | * kept on this list. Therefore all requests that are in | |
914 | * flight will be found first, followed by all requests that | |
915 | * have not yet been sent. And to resend requests while | |
916 | * preserving this order we will want to put any sent | |
917 | * requests back on the front of the osd client's unsent | |
918 | * list. | |
919 | * | |
920 | * So we build a separate ordered list of already-sent | |
921 | * requests for the affected osd and splice it onto the | |
922 | * front of the osd client's unsent list. Once we've seen a | |
923 | * request that has not yet been sent we're done. Those | |
924 | * requests are already sitting right where they belong. | |
925 | */ | |
6f6c7006 | 926 | list_for_each_entry(req, &osd->o_requests, r_osd_item) { |
e02493c0 AE |
927 | if (!req->r_sent) |
928 | break; | |
929 | list_move_tail(&req->r_req_lru_item, &resend); | |
930 | dout("requeueing %p tid %llu osd%d\n", req, req->r_tid, | |
6f6c7006 | 931 | osd->o_osd); |
a40c4f10 YS |
932 | if (!req->r_linger) |
933 | req->r_flags |= CEPH_OSD_FLAG_RETRY; | |
934 | } | |
e02493c0 | 935 | list_splice(&resend, &osdc->req_unsent); |
a40c4f10 | 936 | |
e02493c0 AE |
937 | /* |
938 | * Linger requests are re-registered before sending, which | |
939 | * sets up a new tid for each. We add them to the unsent | |
940 | * list at the end to keep things in tid order. | |
941 | */ | |
a40c4f10 | 942 | list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, |
1d0326b1 | 943 | r_linger_osd_item) { |
77f38e0e SW |
944 | /* |
945 | * reregister request prior to unregistering linger so | |
946 | * that r_osd is preserved. | |
947 | */ | |
948 | BUG_ON(!list_empty(&req->r_req_lru_item)); | |
a40c4f10 | 949 | __register_request(osdc, req); |
e02493c0 | 950 | list_add_tail(&req->r_req_lru_item, &osdc->req_unsent); |
ad885927 | 951 | list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); |
77f38e0e | 952 | __unregister_linger_request(osdc, req); |
a40c4f10 YS |
953 | dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, |
954 | osd->o_osd); | |
6f6c7006 SW |
955 | } |
956 | } | |
957 | ||
f24e9980 | 958 | /* |
81b024e7 | 959 | * If the osd connection drops, we need to resubmit all requests. |
f24e9980 SW |
960 | */ |
961 | static void osd_reset(struct ceph_connection *con) | |
962 | { | |
963 | struct ceph_osd *osd = con->private; | |
964 | struct ceph_osd_client *osdc; | |
965 | ||
966 | if (!osd) | |
967 | return; | |
968 | dout("osd_reset osd%d\n", osd->o_osd); | |
969 | osdc = osd->o_osdc; | |
f24e9980 | 970 | down_read(&osdc->map_sem); |
83aff95e SW |
971 | mutex_lock(&osdc->request_mutex); |
972 | __kick_osd_requests(osdc, osd); | |
f9d25199 | 973 | __send_queued(osdc); |
83aff95e | 974 | mutex_unlock(&osdc->request_mutex); |
f24e9980 SW |
975 | up_read(&osdc->map_sem); |
976 | } | |
977 | ||
978 | /* | |
979 | * Track open sessions with osds. | |
980 | */ | |
e10006f8 | 981 | static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) |
f24e9980 SW |
982 | { |
983 | struct ceph_osd *osd; | |
984 | ||
985 | osd = kzalloc(sizeof(*osd), GFP_NOFS); | |
986 | if (!osd) | |
987 | return NULL; | |
988 | ||
989 | atomic_set(&osd->o_ref, 1); | |
990 | osd->o_osdc = osdc; | |
e10006f8 | 991 | osd->o_osd = onum; |
f407731d | 992 | RB_CLEAR_NODE(&osd->o_node); |
f24e9980 | 993 | INIT_LIST_HEAD(&osd->o_requests); |
a40c4f10 | 994 | INIT_LIST_HEAD(&osd->o_linger_requests); |
f5a2041b | 995 | INIT_LIST_HEAD(&osd->o_osd_lru); |
f24e9980 SW |
996 | osd->o_incarnation = 1; |
997 | ||
b7a9e5dd | 998 | ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); |
4e7a5dcd | 999 | |
422d2cb8 | 1000 | INIT_LIST_HEAD(&osd->o_keepalive_item); |
f24e9980 SW |
1001 | return osd; |
1002 | } | |
1003 | ||
1004 | static struct ceph_osd *get_osd(struct ceph_osd *osd) | |
1005 | { | |
1006 | if (atomic_inc_not_zero(&osd->o_ref)) { | |
1007 | dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1, | |
1008 | atomic_read(&osd->o_ref)); | |
1009 | return osd; | |
1010 | } else { | |
1011 | dout("get_osd %p FAIL\n", osd); | |
1012 | return NULL; | |
1013 | } | |
1014 | } | |
1015 | ||
1016 | static void put_osd(struct ceph_osd *osd) | |
1017 | { | |
1018 | dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), | |
1019 | atomic_read(&osd->o_ref) - 1); | |
a255651d | 1020 | if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { |
79494d1b SW |
1021 | struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; |
1022 | ||
27859f97 | 1023 | ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); |
f24e9980 | 1024 | kfree(osd); |
79494d1b | 1025 | } |
f24e9980 SW |
1026 | } |
1027 | ||
1028 | /* | |
1029 | * remove an osd from our map | |
1030 | */ | |
f5a2041b | 1031 | static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) |
f24e9980 | 1032 | { |
f5a2041b | 1033 | dout("__remove_osd %p\n", osd); |
f24e9980 | 1034 | BUG_ON(!list_empty(&osd->o_requests)); |
7c6e6fc5 ID |
1035 | BUG_ON(!list_empty(&osd->o_linger_requests)); |
1036 | ||
f24e9980 | 1037 | rb_erase(&osd->o_node, &osdc->osds); |
f5a2041b | 1038 | list_del_init(&osd->o_osd_lru); |
f24e9980 SW |
1039 | ceph_con_close(&osd->o_con); |
1040 | put_osd(osd); | |
1041 | } | |
1042 | ||
aca420bc SW |
1043 | static void remove_all_osds(struct ceph_osd_client *osdc) |
1044 | { | |
048a9d2d | 1045 | dout("%s %p\n", __func__, osdc); |
aca420bc SW |
1046 | mutex_lock(&osdc->request_mutex); |
1047 | while (!RB_EMPTY_ROOT(&osdc->osds)) { | |
1048 | struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), | |
1049 | struct ceph_osd, o_node); | |
1050 | __remove_osd(osdc, osd); | |
1051 | } | |
1052 | mutex_unlock(&osdc->request_mutex); | |
1053 | } | |
1054 | ||
f5a2041b YS |
1055 | static void __move_osd_to_lru(struct ceph_osd_client *osdc, |
1056 | struct ceph_osd *osd) | |
1057 | { | |
bbf37ec3 | 1058 | dout("%s %p\n", __func__, osd); |
f5a2041b | 1059 | BUG_ON(!list_empty(&osd->o_osd_lru)); |
bbf37ec3 | 1060 | |
f5a2041b | 1061 | list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); |
3d14c5d2 | 1062 | osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ; |
f5a2041b YS |
1063 | } |
1064 | ||
bbf37ec3 ID |
1065 | static void maybe_move_osd_to_lru(struct ceph_osd_client *osdc, |
1066 | struct ceph_osd *osd) | |
1067 | { | |
1068 | dout("%s %p\n", __func__, osd); | |
1069 | ||
1070 | if (list_empty(&osd->o_requests) && | |
1071 | list_empty(&osd->o_linger_requests)) | |
1072 | __move_osd_to_lru(osdc, osd); | |
1073 | } | |
1074 | ||
f5a2041b YS |
1075 | static void __remove_osd_from_lru(struct ceph_osd *osd) |
1076 | { | |
1077 | dout("__remove_osd_from_lru %p\n", osd); | |
1078 | if (!list_empty(&osd->o_osd_lru)) | |
1079 | list_del_init(&osd->o_osd_lru); | |
1080 | } | |
1081 | ||
aca420bc | 1082 | static void remove_old_osds(struct ceph_osd_client *osdc) |
f5a2041b YS |
1083 | { |
1084 | struct ceph_osd *osd, *nosd; | |
1085 | ||
1086 | dout("__remove_old_osds %p\n", osdc); | |
1087 | mutex_lock(&osdc->request_mutex); | |
1088 | list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { | |
aca420bc | 1089 | if (time_before(jiffies, osd->lru_ttl)) |
f5a2041b YS |
1090 | break; |
1091 | __remove_osd(osdc, osd); | |
1092 | } | |
1093 | mutex_unlock(&osdc->request_mutex); | |
1094 | } | |
1095 | ||
f24e9980 SW |
1096 | /* |
1097 | * reset osd connect | |
1098 | */ | |
f5a2041b | 1099 | static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) |
f24e9980 | 1100 | { |
c3acb181 | 1101 | struct ceph_entity_addr *peer_addr; |
f24e9980 | 1102 | |
f5a2041b | 1103 | dout("__reset_osd %p osd%d\n", osd, osd->o_osd); |
a40c4f10 YS |
1104 | if (list_empty(&osd->o_requests) && |
1105 | list_empty(&osd->o_linger_requests)) { | |
f5a2041b | 1106 | __remove_osd(osdc, osd); |
c3acb181 AE |
1107 | |
1108 | return -ENODEV; | |
1109 | } | |
1110 | ||
1111 | peer_addr = &osdc->osdmap->osd_addr[osd->o_osd]; | |
1112 | if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && | |
1113 | !ceph_con_opened(&osd->o_con)) { | |
1114 | struct ceph_osd_request *req; | |
1115 | ||
0b4af2e8 ID |
1116 | dout("osd addr hasn't changed and connection never opened, " |
1117 | "letting msgr retry\n"); | |
87b315a5 SW |
1118 | /* touch each r_stamp for handle_timeout()'s benfit */ |
1119 | list_for_each_entry(req, &osd->o_requests, r_osd_item) | |
1120 | req->r_stamp = jiffies; | |
c3acb181 AE |
1121 | |
1122 | return -EAGAIN; | |
f24e9980 | 1123 | } |
c3acb181 AE |
1124 | |
1125 | ceph_con_close(&osd->o_con); | |
1126 | ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); | |
1127 | osd->o_incarnation++; | |
1128 | ||
1129 | return 0; | |
f24e9980 SW |
1130 | } |
1131 | ||
1132 | static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) | |
1133 | { | |
1134 | struct rb_node **p = &osdc->osds.rb_node; | |
1135 | struct rb_node *parent = NULL; | |
1136 | struct ceph_osd *osd = NULL; | |
1137 | ||
aca420bc | 1138 | dout("__insert_osd %p osd%d\n", new, new->o_osd); |
f24e9980 SW |
1139 | while (*p) { |
1140 | parent = *p; | |
1141 | osd = rb_entry(parent, struct ceph_osd, o_node); | |
1142 | if (new->o_osd < osd->o_osd) | |
1143 | p = &(*p)->rb_left; | |
1144 | else if (new->o_osd > osd->o_osd) | |
1145 | p = &(*p)->rb_right; | |
1146 | else | |
1147 | BUG(); | |
1148 | } | |
1149 | ||
1150 | rb_link_node(&new->o_node, parent, p); | |
1151 | rb_insert_color(&new->o_node, &osdc->osds); | |
1152 | } | |
1153 | ||
1154 | static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) | |
1155 | { | |
1156 | struct ceph_osd *osd; | |
1157 | struct rb_node *n = osdc->osds.rb_node; | |
1158 | ||
1159 | while (n) { | |
1160 | osd = rb_entry(n, struct ceph_osd, o_node); | |
1161 | if (o < osd->o_osd) | |
1162 | n = n->rb_left; | |
1163 | else if (o > osd->o_osd) | |
1164 | n = n->rb_right; | |
1165 | else | |
1166 | return osd; | |
1167 | } | |
1168 | return NULL; | |
1169 | } | |
1170 | ||
422d2cb8 YS |
1171 | static void __schedule_osd_timeout(struct ceph_osd_client *osdc) |
1172 | { | |
1173 | schedule_delayed_work(&osdc->timeout_work, | |
3d14c5d2 | 1174 | osdc->client->options->osd_keepalive_timeout * HZ); |
422d2cb8 YS |
1175 | } |
1176 | ||
1177 | static void __cancel_osd_timeout(struct ceph_osd_client *osdc) | |
1178 | { | |
1179 | cancel_delayed_work(&osdc->timeout_work); | |
1180 | } | |
f24e9980 SW |
1181 | |
1182 | /* | |
1183 | * Register request, assign tid. If this is the first request, set up | |
1184 | * the timeout event. | |
1185 | */ | |
a40c4f10 YS |
1186 | static void __register_request(struct ceph_osd_client *osdc, |
1187 | struct ceph_osd_request *req) | |
f24e9980 | 1188 | { |
f24e9980 | 1189 | req->r_tid = ++osdc->last_tid; |
6df058c0 | 1190 | req->r_request->hdr.tid = cpu_to_le64(req->r_tid); |
77f38e0e | 1191 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
f24e9980 SW |
1192 | __insert_request(osdc, req); |
1193 | ceph_osdc_get_request(req); | |
1194 | osdc->num_requests++; | |
f24e9980 | 1195 | if (osdc->num_requests == 1) { |
422d2cb8 YS |
1196 | dout(" first request, scheduling timeout\n"); |
1197 | __schedule_osd_timeout(osdc); | |
f24e9980 | 1198 | } |
a40c4f10 YS |
1199 | } |
1200 | ||
f24e9980 SW |
1201 | /* |
1202 | * called under osdc->request_mutex | |
1203 | */ | |
1204 | static void __unregister_request(struct ceph_osd_client *osdc, | |
1205 | struct ceph_osd_request *req) | |
1206 | { | |
35f9f8a0 SW |
1207 | if (RB_EMPTY_NODE(&req->r_node)) { |
1208 | dout("__unregister_request %p tid %lld not registered\n", | |
1209 | req, req->r_tid); | |
1210 | return; | |
1211 | } | |
1212 | ||
f24e9980 SW |
1213 | dout("__unregister_request %p tid %lld\n", req, req->r_tid); |
1214 | rb_erase(&req->r_node, &osdc->requests); | |
6562d661 | 1215 | RB_CLEAR_NODE(&req->r_node); |
f24e9980 SW |
1216 | osdc->num_requests--; |
1217 | ||
0ba6478d SW |
1218 | if (req->r_osd) { |
1219 | /* make sure the original request isn't in flight. */ | |
6740a845 | 1220 | ceph_msg_revoke(req->r_request); |
0ba6478d SW |
1221 | |
1222 | list_del_init(&req->r_osd_item); | |
bbf37ec3 | 1223 | maybe_move_osd_to_lru(osdc, req->r_osd); |
4f23409e | 1224 | if (list_empty(&req->r_linger_osd_item)) |
a40c4f10 | 1225 | req->r_osd = NULL; |
0ba6478d | 1226 | } |
f24e9980 | 1227 | |
7d5f2481 | 1228 | list_del_init(&req->r_req_lru_item); |
f24e9980 SW |
1229 | ceph_osdc_put_request(req); |
1230 | ||
422d2cb8 YS |
1231 | if (osdc->num_requests == 0) { |
1232 | dout(" no requests, canceling timeout\n"); | |
1233 | __cancel_osd_timeout(osdc); | |
f24e9980 SW |
1234 | } |
1235 | } | |
1236 | ||
1237 | /* | |
1238 | * Cancel a previously queued request message | |
1239 | */ | |
1240 | static void __cancel_request(struct ceph_osd_request *req) | |
1241 | { | |
6bc18876 | 1242 | if (req->r_sent && req->r_osd) { |
6740a845 | 1243 | ceph_msg_revoke(req->r_request); |
f24e9980 SW |
1244 | req->r_sent = 0; |
1245 | } | |
1246 | } | |
1247 | ||
a40c4f10 YS |
1248 | static void __register_linger_request(struct ceph_osd_client *osdc, |
1249 | struct ceph_osd_request *req) | |
1250 | { | |
af593064 ID |
1251 | dout("%s %p tid %llu\n", __func__, req, req->r_tid); |
1252 | WARN_ON(!req->r_linger); | |
1253 | ||
96e4dac6 | 1254 | ceph_osdc_get_request(req); |
a40c4f10 | 1255 | list_add_tail(&req->r_linger_item, &osdc->req_linger); |
6194ea89 | 1256 | if (req->r_osd) |
1d0326b1 | 1257 | list_add_tail(&req->r_linger_osd_item, |
6194ea89 | 1258 | &req->r_osd->o_linger_requests); |
a40c4f10 YS |
1259 | } |
1260 | ||
1261 | static void __unregister_linger_request(struct ceph_osd_client *osdc, | |
1262 | struct ceph_osd_request *req) | |
1263 | { | |
af593064 ID |
1264 | WARN_ON(!req->r_linger); |
1265 | ||
1266 | if (list_empty(&req->r_linger_item)) { | |
1267 | dout("%s %p tid %llu not registered\n", __func__, req, | |
1268 | req->r_tid); | |
1269 | return; | |
1270 | } | |
1271 | ||
1272 | dout("%s %p tid %llu\n", __func__, req, req->r_tid); | |
61c74035 | 1273 | list_del_init(&req->r_linger_item); |
af593064 | 1274 | |
a40c4f10 | 1275 | if (req->r_osd) { |
1d0326b1 | 1276 | list_del_init(&req->r_linger_osd_item); |
bbf37ec3 | 1277 | maybe_move_osd_to_lru(osdc, req->r_osd); |
fbdb9190 SW |
1278 | if (list_empty(&req->r_osd_item)) |
1279 | req->r_osd = NULL; | |
a40c4f10 | 1280 | } |
96e4dac6 | 1281 | ceph_osdc_put_request(req); |
a40c4f10 YS |
1282 | } |
1283 | ||
a40c4f10 YS |
1284 | void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, |
1285 | struct ceph_osd_request *req) | |
1286 | { | |
1287 | if (!req->r_linger) { | |
1288 | dout("set_request_linger %p\n", req); | |
1289 | req->r_linger = 1; | |
a40c4f10 YS |
1290 | } |
1291 | } | |
1292 | EXPORT_SYMBOL(ceph_osdc_set_request_linger); | |
1293 | ||
d29adb34 JD |
1294 | /* |
1295 | * Returns whether a request should be blocked from being sent | |
1296 | * based on the current osdmap and osd_client settings. | |
1297 | * | |
1298 | * Caller should hold map_sem for read. | |
1299 | */ | |
1300 | static bool __req_should_be_paused(struct ceph_osd_client *osdc, | |
1301 | struct ceph_osd_request *req) | |
1302 | { | |
1303 | bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); | |
1304 | bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || | |
1305 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); | |
1306 | return (req->r_flags & CEPH_OSD_FLAG_READ && pauserd) || | |
1307 | (req->r_flags & CEPH_OSD_FLAG_WRITE && pausewr); | |
1308 | } | |
1309 | ||
17a13e40 ID |
1310 | /* |
1311 | * Calculate mapping of a request to a PG. Takes tiering into account. | |
1312 | */ | |
1313 | static int __calc_request_pg(struct ceph_osdmap *osdmap, | |
1314 | struct ceph_osd_request *req, | |
1315 | struct ceph_pg *pg_out) | |
1316 | { | |
205ee118 ID |
1317 | bool need_check_tiering; |
1318 | ||
1319 | need_check_tiering = false; | |
1320 | if (req->r_target_oloc.pool == -1) { | |
1321 | req->r_target_oloc = req->r_base_oloc; /* struct */ | |
1322 | need_check_tiering = true; | |
1323 | } | |
1324 | if (req->r_target_oid.name_len == 0) { | |
1325 | ceph_oid_copy(&req->r_target_oid, &req->r_base_oid); | |
1326 | need_check_tiering = true; | |
1327 | } | |
1328 | ||
1329 | if (need_check_tiering && | |
1330 | (req->r_flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) { | |
17a13e40 ID |
1331 | struct ceph_pg_pool_info *pi; |
1332 | ||
205ee118 | 1333 | pi = ceph_pg_pool_by_id(osdmap, req->r_target_oloc.pool); |
17a13e40 ID |
1334 | if (pi) { |
1335 | if ((req->r_flags & CEPH_OSD_FLAG_READ) && | |
1336 | pi->read_tier >= 0) | |
205ee118 | 1337 | req->r_target_oloc.pool = pi->read_tier; |
17a13e40 ID |
1338 | if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && |
1339 | pi->write_tier >= 0) | |
205ee118 | 1340 | req->r_target_oloc.pool = pi->write_tier; |
17a13e40 ID |
1341 | } |
1342 | /* !pi is caught in ceph_oloc_oid_to_pg() */ | |
1343 | } | |
1344 | ||
205ee118 ID |
1345 | return ceph_oloc_oid_to_pg(osdmap, &req->r_target_oloc, |
1346 | &req->r_target_oid, pg_out); | |
17a13e40 ID |
1347 | } |
1348 | ||
f671b581 ID |
1349 | static void __enqueue_request(struct ceph_osd_request *req) |
1350 | { | |
1351 | struct ceph_osd_client *osdc = req->r_osdc; | |
1352 | ||
1353 | dout("%s %p tid %llu to osd%d\n", __func__, req, req->r_tid, | |
1354 | req->r_osd ? req->r_osd->o_osd : -1); | |
1355 | ||
1356 | if (req->r_osd) { | |
1357 | __remove_osd_from_lru(req->r_osd); | |
1358 | list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); | |
1359 | list_move_tail(&req->r_req_lru_item, &osdc->req_unsent); | |
1360 | } else { | |
1361 | list_move_tail(&req->r_req_lru_item, &osdc->req_notarget); | |
1362 | } | |
1363 | } | |
1364 | ||
f24e9980 SW |
1365 | /* |
1366 | * Pick an osd (the first 'up' osd in the pg), allocate the osd struct | |
1367 | * (as needed), and set the request r_osd appropriately. If there is | |
25985edc | 1368 | * no up osd, set r_osd to NULL. Move the request to the appropriate list |
6f6c7006 | 1369 | * (unsent, homeless) or leave on in-flight lru. |
f24e9980 SW |
1370 | * |
1371 | * Return 0 if unchanged, 1 if changed, or negative on error. | |
1372 | * | |
1373 | * Caller should hold map_sem for read and request_mutex. | |
1374 | */ | |
6f6c7006 | 1375 | static int __map_request(struct ceph_osd_client *osdc, |
38d6453c | 1376 | struct ceph_osd_request *req, int force_resend) |
f24e9980 | 1377 | { |
5b191d99 | 1378 | struct ceph_pg pgid; |
d85b7056 | 1379 | int acting[CEPH_PG_MAX_SIZE]; |
8008ab10 | 1380 | int num, o; |
f24e9980 | 1381 | int err; |
d29adb34 | 1382 | bool was_paused; |
f24e9980 | 1383 | |
6f6c7006 | 1384 | dout("map_request %p tid %lld\n", req, req->r_tid); |
17a13e40 ID |
1385 | |
1386 | err = __calc_request_pg(osdc->osdmap, req, &pgid); | |
6f6c7006 SW |
1387 | if (err) { |
1388 | list_move(&req->r_req_lru_item, &osdc->req_notarget); | |
f24e9980 | 1389 | return err; |
6f6c7006 | 1390 | } |
7740a42f SW |
1391 | req->r_pgid = pgid; |
1392 | ||
8008ab10 ID |
1393 | num = ceph_calc_pg_acting(osdc->osdmap, pgid, acting, &o); |
1394 | if (num < 0) | |
1395 | num = 0; | |
f24e9980 | 1396 | |
d29adb34 JD |
1397 | was_paused = req->r_paused; |
1398 | req->r_paused = __req_should_be_paused(osdc, req); | |
1399 | if (was_paused && !req->r_paused) | |
1400 | force_resend = 1; | |
1401 | ||
38d6453c SW |
1402 | if ((!force_resend && |
1403 | req->r_osd && req->r_osd->o_osd == o && | |
d85b7056 SW |
1404 | req->r_sent >= req->r_osd->o_incarnation && |
1405 | req->r_num_pg_osds == num && | |
1406 | memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) || | |
d29adb34 JD |
1407 | (req->r_osd == NULL && o == -1) || |
1408 | req->r_paused) | |
f24e9980 SW |
1409 | return 0; /* no change */ |
1410 | ||
5b191d99 SW |
1411 | dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n", |
1412 | req->r_tid, pgid.pool, pgid.seed, o, | |
f24e9980 SW |
1413 | req->r_osd ? req->r_osd->o_osd : -1); |
1414 | ||
d85b7056 SW |
1415 | /* record full pg acting set */ |
1416 | memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num); | |
1417 | req->r_num_pg_osds = num; | |
1418 | ||
f24e9980 SW |
1419 | if (req->r_osd) { |
1420 | __cancel_request(req); | |
1421 | list_del_init(&req->r_osd_item); | |
f24e9980 SW |
1422 | req->r_osd = NULL; |
1423 | } | |
1424 | ||
1425 | req->r_osd = __lookup_osd(osdc, o); | |
1426 | if (!req->r_osd && o >= 0) { | |
c99eb1c7 | 1427 | err = -ENOMEM; |
e10006f8 | 1428 | req->r_osd = create_osd(osdc, o); |
6f6c7006 SW |
1429 | if (!req->r_osd) { |
1430 | list_move(&req->r_req_lru_item, &osdc->req_notarget); | |
c99eb1c7 | 1431 | goto out; |
6f6c7006 | 1432 | } |
f24e9980 | 1433 | |
6f6c7006 | 1434 | dout("map_request osd %p is osd%d\n", req->r_osd, o); |
f24e9980 SW |
1435 | __insert_osd(osdc, req->r_osd); |
1436 | ||
b7a9e5dd SW |
1437 | ceph_con_open(&req->r_osd->o_con, |
1438 | CEPH_ENTITY_TYPE_OSD, o, | |
1439 | &osdc->osdmap->osd_addr[o]); | |
f24e9980 SW |
1440 | } |
1441 | ||
f671b581 | 1442 | __enqueue_request(req); |
d85b7056 | 1443 | err = 1; /* osd or pg changed */ |
f24e9980 SW |
1444 | |
1445 | out: | |
f24e9980 SW |
1446 | return err; |
1447 | } | |
1448 | ||
1449 | /* | |
1450 | * caller should hold map_sem (for read) and request_mutex | |
1451 | */ | |
56e925b6 SW |
1452 | static void __send_request(struct ceph_osd_client *osdc, |
1453 | struct ceph_osd_request *req) | |
f24e9980 | 1454 | { |
1b83bef2 | 1455 | void *p; |
f24e9980 | 1456 | |
1b83bef2 SW |
1457 | dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n", |
1458 | req, req->r_tid, req->r_osd->o_osd, req->r_flags, | |
1459 | (unsigned long long)req->r_pgid.pool, req->r_pgid.seed); | |
1460 | ||
1461 | /* fill in message content that changes each time we send it */ | |
1462 | put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch); | |
1463 | put_unaligned_le32(req->r_flags, req->r_request_flags); | |
205ee118 | 1464 | put_unaligned_le64(req->r_target_oloc.pool, req->r_request_pool); |
1b83bef2 SW |
1465 | p = req->r_request_pgid; |
1466 | ceph_encode_64(&p, req->r_pgid.pool); | |
1467 | ceph_encode_32(&p, req->r_pgid.seed); | |
1468 | put_unaligned_le64(1, req->r_request_attempts); /* FIXME */ | |
1469 | memcpy(req->r_request_reassert_version, &req->r_reassert_version, | |
1470 | sizeof(req->r_reassert_version)); | |
2169aea6 | 1471 | |
3dd72fc0 | 1472 | req->r_stamp = jiffies; |
07a27e22 | 1473 | list_move_tail(&req->r_req_lru_item, &osdc->req_lru); |
f24e9980 SW |
1474 | |
1475 | ceph_msg_get(req->r_request); /* send consumes a ref */ | |
26be8808 | 1476 | |
f24e9980 | 1477 | req->r_sent = req->r_osd->o_incarnation; |
26be8808 AE |
1478 | |
1479 | ceph_con_send(&req->r_osd->o_con, req->r_request); | |
f24e9980 SW |
1480 | } |
1481 | ||
6f6c7006 SW |
1482 | /* |
1483 | * Send any requests in the queue (req_unsent). | |
1484 | */ | |
f9d25199 | 1485 | static void __send_queued(struct ceph_osd_client *osdc) |
6f6c7006 SW |
1486 | { |
1487 | struct ceph_osd_request *req, *tmp; | |
1488 | ||
f9d25199 AE |
1489 | dout("__send_queued\n"); |
1490 | list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) | |
6f6c7006 | 1491 | __send_request(osdc, req); |
6f6c7006 SW |
1492 | } |
1493 | ||
0bbfdfe8 ID |
1494 | /* |
1495 | * Caller should hold map_sem for read and request_mutex. | |
1496 | */ | |
1497 | static int __ceph_osdc_start_request(struct ceph_osd_client *osdc, | |
1498 | struct ceph_osd_request *req, | |
1499 | bool nofail) | |
1500 | { | |
1501 | int rc; | |
1502 | ||
1503 | __register_request(osdc, req); | |
1504 | req->r_sent = 0; | |
1505 | req->r_got_reply = 0; | |
1506 | rc = __map_request(osdc, req, 0); | |
1507 | if (rc < 0) { | |
1508 | if (nofail) { | |
1509 | dout("osdc_start_request failed map, " | |
1510 | " will retry %lld\n", req->r_tid); | |
1511 | rc = 0; | |
1512 | } else { | |
1513 | __unregister_request(osdc, req); | |
1514 | } | |
1515 | return rc; | |
1516 | } | |
1517 | ||
1518 | if (req->r_osd == NULL) { | |
1519 | dout("send_request %p no up osds in pg\n", req); | |
1520 | ceph_monc_request_next_osdmap(&osdc->client->monc); | |
1521 | } else { | |
1522 | __send_queued(osdc); | |
1523 | } | |
1524 | ||
1525 | return 0; | |
1526 | } | |
1527 | ||
f24e9980 SW |
1528 | /* |
1529 | * Timeout callback, called every N seconds when 1 or more osd | |
1530 | * requests has been active for more than N seconds. When this | |
1531 | * happens, we ping all OSDs with requests who have timed out to | |
1532 | * ensure any communications channel reset is detected. Reset the | |
1533 | * request timeouts another N seconds in the future as we go. | |
1534 | * Reschedule the timeout event another N seconds in future (unless | |
1535 | * there are no open requests). | |
1536 | */ | |
1537 | static void handle_timeout(struct work_struct *work) | |
1538 | { | |
1539 | struct ceph_osd_client *osdc = | |
1540 | container_of(work, struct ceph_osd_client, timeout_work.work); | |
83aff95e | 1541 | struct ceph_osd_request *req; |
f24e9980 | 1542 | struct ceph_osd *osd; |
422d2cb8 | 1543 | unsigned long keepalive = |
3d14c5d2 | 1544 | osdc->client->options->osd_keepalive_timeout * HZ; |
422d2cb8 | 1545 | struct list_head slow_osds; |
f24e9980 SW |
1546 | dout("timeout\n"); |
1547 | down_read(&osdc->map_sem); | |
1548 | ||
1549 | ceph_monc_request_next_osdmap(&osdc->client->monc); | |
1550 | ||
1551 | mutex_lock(&osdc->request_mutex); | |
f24e9980 | 1552 | |
422d2cb8 YS |
1553 | /* |
1554 | * ping osds that are a bit slow. this ensures that if there | |
1555 | * is a break in the TCP connection we will notice, and reopen | |
1556 | * a connection with that osd (from the fault callback). | |
1557 | */ | |
1558 | INIT_LIST_HEAD(&slow_osds); | |
1559 | list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { | |
3dd72fc0 | 1560 | if (time_before(jiffies, req->r_stamp + keepalive)) |
422d2cb8 YS |
1561 | break; |
1562 | ||
1563 | osd = req->r_osd; | |
1564 | BUG_ON(!osd); | |
1565 | dout(" tid %llu is slow, will send keepalive on osd%d\n", | |
f24e9980 | 1566 | req->r_tid, osd->o_osd); |
422d2cb8 YS |
1567 | list_move_tail(&osd->o_keepalive_item, &slow_osds); |
1568 | } | |
1569 | while (!list_empty(&slow_osds)) { | |
1570 | osd = list_entry(slow_osds.next, struct ceph_osd, | |
1571 | o_keepalive_item); | |
1572 | list_del_init(&osd->o_keepalive_item); | |
f24e9980 SW |
1573 | ceph_con_keepalive(&osd->o_con); |
1574 | } | |
1575 | ||
422d2cb8 | 1576 | __schedule_osd_timeout(osdc); |
f9d25199 | 1577 | __send_queued(osdc); |
f24e9980 | 1578 | mutex_unlock(&osdc->request_mutex); |
f24e9980 SW |
1579 | up_read(&osdc->map_sem); |
1580 | } | |
1581 | ||
f5a2041b YS |
1582 | static void handle_osds_timeout(struct work_struct *work) |
1583 | { | |
1584 | struct ceph_osd_client *osdc = | |
1585 | container_of(work, struct ceph_osd_client, | |
1586 | osds_timeout_work.work); | |
1587 | unsigned long delay = | |
3d14c5d2 | 1588 | osdc->client->options->osd_idle_ttl * HZ >> 2; |
f5a2041b YS |
1589 | |
1590 | dout("osds timeout\n"); | |
1591 | down_read(&osdc->map_sem); | |
aca420bc | 1592 | remove_old_osds(osdc); |
f5a2041b YS |
1593 | up_read(&osdc->map_sem); |
1594 | ||
1595 | schedule_delayed_work(&osdc->osds_timeout_work, | |
1596 | round_jiffies_relative(delay)); | |
1597 | } | |
1598 | ||
205ee118 ID |
1599 | static int ceph_oloc_decode(void **p, void *end, |
1600 | struct ceph_object_locator *oloc) | |
1601 | { | |
1602 | u8 struct_v, struct_cv; | |
1603 | u32 len; | |
1604 | void *struct_end; | |
1605 | int ret = 0; | |
1606 | ||
1607 | ceph_decode_need(p, end, 1 + 1 + 4, e_inval); | |
1608 | struct_v = ceph_decode_8(p); | |
1609 | struct_cv = ceph_decode_8(p); | |
1610 | if (struct_v < 3) { | |
1611 | pr_warn("got v %d < 3 cv %d of ceph_object_locator\n", | |
1612 | struct_v, struct_cv); | |
1613 | goto e_inval; | |
1614 | } | |
1615 | if (struct_cv > 6) { | |
1616 | pr_warn("got v %d cv %d > 6 of ceph_object_locator\n", | |
1617 | struct_v, struct_cv); | |
1618 | goto e_inval; | |
1619 | } | |
1620 | len = ceph_decode_32(p); | |
1621 | ceph_decode_need(p, end, len, e_inval); | |
1622 | struct_end = *p + len; | |
1623 | ||
1624 | oloc->pool = ceph_decode_64(p); | |
1625 | *p += 4; /* skip preferred */ | |
1626 | ||
1627 | len = ceph_decode_32(p); | |
1628 | if (len > 0) { | |
1629 | pr_warn("ceph_object_locator::key is set\n"); | |
1630 | goto e_inval; | |
1631 | } | |
1632 | ||
1633 | if (struct_v >= 5) { | |
1634 | len = ceph_decode_32(p); | |
1635 | if (len > 0) { | |
1636 | pr_warn("ceph_object_locator::nspace is set\n"); | |
1637 | goto e_inval; | |
1638 | } | |
1639 | } | |
1640 | ||
1641 | if (struct_v >= 6) { | |
1642 | s64 hash = ceph_decode_64(p); | |
1643 | if (hash != -1) { | |
1644 | pr_warn("ceph_object_locator::hash is set\n"); | |
1645 | goto e_inval; | |
1646 | } | |
1647 | } | |
1648 | ||
1649 | /* skip the rest */ | |
1650 | *p = struct_end; | |
1651 | out: | |
1652 | return ret; | |
1653 | ||
1654 | e_inval: | |
1655 | ret = -EINVAL; | |
1656 | goto out; | |
1657 | } | |
1658 | ||
1659 | static int ceph_redirect_decode(void **p, void *end, | |
1660 | struct ceph_request_redirect *redir) | |
1661 | { | |
1662 | u8 struct_v, struct_cv; | |
1663 | u32 len; | |
1664 | void *struct_end; | |
1665 | int ret; | |
1666 | ||
1667 | ceph_decode_need(p, end, 1 + 1 + 4, e_inval); | |
1668 | struct_v = ceph_decode_8(p); | |
1669 | struct_cv = ceph_decode_8(p); | |
1670 | if (struct_cv > 1) { | |
1671 | pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n", | |
1672 | struct_v, struct_cv); | |
1673 | goto e_inval; | |
1674 | } | |
1675 | len = ceph_decode_32(p); | |
1676 | ceph_decode_need(p, end, len, e_inval); | |
1677 | struct_end = *p + len; | |
1678 | ||
1679 | ret = ceph_oloc_decode(p, end, &redir->oloc); | |
1680 | if (ret) | |
1681 | goto out; | |
1682 | ||
1683 | len = ceph_decode_32(p); | |
1684 | if (len > 0) { | |
1685 | pr_warn("ceph_request_redirect::object_name is set\n"); | |
1686 | goto e_inval; | |
1687 | } | |
1688 | ||
1689 | len = ceph_decode_32(p); | |
1690 | *p += len; /* skip osd_instructions */ | |
1691 | ||
1692 | /* skip the rest */ | |
1693 | *p = struct_end; | |
1694 | out: | |
1695 | return ret; | |
1696 | ||
1697 | e_inval: | |
1698 | ret = -EINVAL; | |
1699 | goto out; | |
1700 | } | |
1701 | ||
25845472 SW |
1702 | static void complete_request(struct ceph_osd_request *req) |
1703 | { | |
25845472 SW |
1704 | complete_all(&req->r_safe_completion); /* fsync waiter */ |
1705 | } | |
1706 | ||
f24e9980 SW |
1707 | /* |
1708 | * handle osd op reply. either call the callback if it is specified, | |
1709 | * or do the completion to wake up the waiting thread. | |
1710 | */ | |
350b1c32 SW |
1711 | static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, |
1712 | struct ceph_connection *con) | |
f24e9980 | 1713 | { |
1b83bef2 | 1714 | void *p, *end; |
f24e9980 | 1715 | struct ceph_osd_request *req; |
205ee118 | 1716 | struct ceph_request_redirect redir; |
f24e9980 | 1717 | u64 tid; |
1b83bef2 | 1718 | int object_len; |
79528734 AE |
1719 | unsigned int numops; |
1720 | int payload_len, flags; | |
0ceed5db | 1721 | s32 result; |
1b83bef2 SW |
1722 | s32 retry_attempt; |
1723 | struct ceph_pg pg; | |
1724 | int err; | |
1725 | u32 reassert_epoch; | |
1726 | u64 reassert_version; | |
1727 | u32 osdmap_epoch; | |
0d5af164 | 1728 | int already_completed; |
9fc6e064 | 1729 | u32 bytes; |
79528734 | 1730 | unsigned int i; |
f24e9980 | 1731 | |
6df058c0 | 1732 | tid = le64_to_cpu(msg->hdr.tid); |
1b83bef2 SW |
1733 | dout("handle_reply %p tid %llu\n", msg, tid); |
1734 | ||
1735 | p = msg->front.iov_base; | |
1736 | end = p + msg->front.iov_len; | |
1737 | ||
1738 | ceph_decode_need(&p, end, 4, bad); | |
1739 | object_len = ceph_decode_32(&p); | |
1740 | ceph_decode_need(&p, end, object_len, bad); | |
1741 | p += object_len; | |
1742 | ||
ef4859d6 | 1743 | err = ceph_decode_pgid(&p, end, &pg); |
1b83bef2 | 1744 | if (err) |
f24e9980 | 1745 | goto bad; |
1b83bef2 SW |
1746 | |
1747 | ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad); | |
1748 | flags = ceph_decode_64(&p); | |
1749 | result = ceph_decode_32(&p); | |
1750 | reassert_epoch = ceph_decode_32(&p); | |
1751 | reassert_version = ceph_decode_64(&p); | |
1752 | osdmap_epoch = ceph_decode_32(&p); | |
1753 | ||
f24e9980 | 1754 | /* lookup */ |
ff513ace | 1755 | down_read(&osdc->map_sem); |
f24e9980 SW |
1756 | mutex_lock(&osdc->request_mutex); |
1757 | req = __lookup_request(osdc, tid); | |
1758 | if (req == NULL) { | |
1759 | dout("handle_reply tid %llu dne\n", tid); | |
8058fd45 | 1760 | goto bad_mutex; |
f24e9980 SW |
1761 | } |
1762 | ceph_osdc_get_request(req); | |
1b83bef2 SW |
1763 | |
1764 | dout("handle_reply %p tid %llu req %p result %d\n", msg, tid, | |
1765 | req, result); | |
1766 | ||
18741196 | 1767 | ceph_decode_need(&p, end, 4, bad_put); |
1b83bef2 SW |
1768 | numops = ceph_decode_32(&p); |
1769 | if (numops > CEPH_OSD_MAX_OP) | |
1770 | goto bad_put; | |
1771 | if (numops != req->r_num_ops) | |
1772 | goto bad_put; | |
1773 | payload_len = 0; | |
18741196 | 1774 | ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad_put); |
1b83bef2 SW |
1775 | for (i = 0; i < numops; i++) { |
1776 | struct ceph_osd_op *op = p; | |
1777 | int len; | |
1778 | ||
1779 | len = le32_to_cpu(op->payload_len); | |
1780 | req->r_reply_op_len[i] = len; | |
1781 | dout(" op %d has %d bytes\n", i, len); | |
1782 | payload_len += len; | |
1783 | p += sizeof(*op); | |
1784 | } | |
9fc6e064 AE |
1785 | bytes = le32_to_cpu(msg->hdr.data_len); |
1786 | if (payload_len != bytes) { | |
1b83bef2 | 1787 | pr_warning("sum of op payload lens %d != data_len %d", |
9fc6e064 | 1788 | payload_len, bytes); |
1b83bef2 SW |
1789 | goto bad_put; |
1790 | } | |
1791 | ||
18741196 | 1792 | ceph_decode_need(&p, end, 4 + numops * 4, bad_put); |
1b83bef2 SW |
1793 | retry_attempt = ceph_decode_32(&p); |
1794 | for (i = 0; i < numops; i++) | |
1795 | req->r_reply_op_result[i] = ceph_decode_32(&p); | |
f24e9980 | 1796 | |
205ee118 ID |
1797 | if (le16_to_cpu(msg->hdr.version) >= 6) { |
1798 | p += 8 + 4; /* skip replay_version */ | |
1799 | p += 8; /* skip user_version */ | |
eb845ff1 | 1800 | |
205ee118 ID |
1801 | err = ceph_redirect_decode(&p, end, &redir); |
1802 | if (err) | |
1803 | goto bad_put; | |
1804 | } else { | |
1805 | redir.oloc.pool = -1; | |
1806 | } | |
f24e9980 | 1807 | |
205ee118 ID |
1808 | if (redir.oloc.pool != -1) { |
1809 | dout("redirect pool %lld\n", redir.oloc.pool); | |
1810 | ||
1811 | __unregister_request(osdc, req); | |
205ee118 ID |
1812 | |
1813 | req->r_target_oloc = redir.oloc; /* struct */ | |
1814 | ||
1815 | /* | |
1816 | * Start redirect requests with nofail=true. If | |
1817 | * mapping fails, request will end up on the notarget | |
1818 | * list, waiting for the new osdmap (which can take | |
1819 | * a while), even though the original request mapped | |
1820 | * successfully. In the future we might want to follow | |
1821 | * original request's nofail setting here. | |
1822 | */ | |
ff513ace | 1823 | err = __ceph_osdc_start_request(osdc, req, true); |
205ee118 ID |
1824 | BUG_ON(err); |
1825 | ||
ff513ace | 1826 | goto out_unlock; |
205ee118 ID |
1827 | } |
1828 | ||
1829 | already_completed = req->r_got_reply; | |
1830 | if (!req->r_got_reply) { | |
1b83bef2 | 1831 | req->r_result = result; |
f24e9980 SW |
1832 | dout("handle_reply result %d bytes %d\n", req->r_result, |
1833 | bytes); | |
1834 | if (req->r_result == 0) | |
1835 | req->r_result = bytes; | |
1836 | ||
1837 | /* in case this is a write and we need to replay, */ | |
1b83bef2 SW |
1838 | req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch); |
1839 | req->r_reassert_version.version = cpu_to_le64(reassert_version); | |
f24e9980 SW |
1840 | |
1841 | req->r_got_reply = 1; | |
1842 | } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { | |
1843 | dout("handle_reply tid %llu dup ack\n", tid); | |
ff513ace | 1844 | goto out_unlock; |
f24e9980 SW |
1845 | } |
1846 | ||
1847 | dout("handle_reply tid %llu flags %d\n", tid, flags); | |
1848 | ||
a40c4f10 YS |
1849 | if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK)) |
1850 | __register_linger_request(osdc, req); | |
1851 | ||
f24e9980 | 1852 | /* either this is a read, or we got the safe response */ |
0ceed5db SW |
1853 | if (result < 0 || |
1854 | (flags & CEPH_OSD_FLAG_ONDISK) || | |
f24e9980 SW |
1855 | ((flags & CEPH_OSD_FLAG_WRITE) == 0)) |
1856 | __unregister_request(osdc, req); | |
1857 | ||
1858 | mutex_unlock(&osdc->request_mutex); | |
ff513ace | 1859 | up_read(&osdc->map_sem); |
f24e9980 | 1860 | |
eb845ff1 | 1861 | if (!already_completed) { |
61c5d6bf YZ |
1862 | if (req->r_unsafe_callback && |
1863 | result >= 0 && !(flags & CEPH_OSD_FLAG_ONDISK)) | |
1864 | req->r_unsafe_callback(req, true); | |
eb845ff1 YZ |
1865 | if (req->r_callback) |
1866 | req->r_callback(req, msg); | |
1867 | else | |
1868 | complete_all(&req->r_completion); | |
1869 | } | |
f24e9980 | 1870 | |
61c5d6bf YZ |
1871 | if (flags & CEPH_OSD_FLAG_ONDISK) { |
1872 | if (req->r_unsafe_callback && already_completed) | |
1873 | req->r_unsafe_callback(req, false); | |
25845472 | 1874 | complete_request(req); |
61c5d6bf | 1875 | } |
f24e9980 | 1876 | |
ff513ace | 1877 | out: |
a40c4f10 | 1878 | dout("req=%p req->r_linger=%d\n", req, req->r_linger); |
f24e9980 SW |
1879 | ceph_osdc_put_request(req); |
1880 | return; | |
ff513ace ID |
1881 | out_unlock: |
1882 | mutex_unlock(&osdc->request_mutex); | |
1883 | up_read(&osdc->map_sem); | |
1884 | goto out; | |
f24e9980 | 1885 | |
1b83bef2 | 1886 | bad_put: |
37c89bde LW |
1887 | req->r_result = -EIO; |
1888 | __unregister_request(osdc, req); | |
1889 | if (req->r_callback) | |
1890 | req->r_callback(req, msg); | |
1891 | else | |
1892 | complete_all(&req->r_completion); | |
1893 | complete_request(req); | |
1b83bef2 | 1894 | ceph_osdc_put_request(req); |
8058fd45 AE |
1895 | bad_mutex: |
1896 | mutex_unlock(&osdc->request_mutex); | |
ff513ace | 1897 | up_read(&osdc->map_sem); |
f24e9980 | 1898 | bad: |
1b83bef2 SW |
1899 | pr_err("corrupt osd_op_reply got %d %d\n", |
1900 | (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); | |
9ec7cab1 | 1901 | ceph_msg_dump(msg); |
f24e9980 SW |
1902 | } |
1903 | ||
6f6c7006 | 1904 | static void reset_changed_osds(struct ceph_osd_client *osdc) |
f24e9980 | 1905 | { |
f24e9980 | 1906 | struct rb_node *p, *n; |
f24e9980 | 1907 | |
6f6c7006 SW |
1908 | for (p = rb_first(&osdc->osds); p; p = n) { |
1909 | struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); | |
f24e9980 | 1910 | |
6f6c7006 SW |
1911 | n = rb_next(p); |
1912 | if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || | |
1913 | memcmp(&osd->o_con.peer_addr, | |
1914 | ceph_osd_addr(osdc->osdmap, | |
1915 | osd->o_osd), | |
1916 | sizeof(struct ceph_entity_addr)) != 0) | |
1917 | __reset_osd(osdc, osd); | |
f24e9980 | 1918 | } |
422d2cb8 YS |
1919 | } |
1920 | ||
1921 | /* | |
6f6c7006 SW |
1922 | * Requeue requests whose mapping to an OSD has changed. If requests map to |
1923 | * no osd, request a new map. | |
422d2cb8 | 1924 | * |
e6d50f67 | 1925 | * Caller should hold map_sem for read. |
422d2cb8 | 1926 | */ |
9a1ea2db JD |
1927 | static void kick_requests(struct ceph_osd_client *osdc, bool force_resend, |
1928 | bool force_resend_writes) | |
422d2cb8 | 1929 | { |
a40c4f10 | 1930 | struct ceph_osd_request *req, *nreq; |
6f6c7006 SW |
1931 | struct rb_node *p; |
1932 | int needmap = 0; | |
1933 | int err; | |
9a1ea2db | 1934 | bool force_resend_req; |
422d2cb8 | 1935 | |
9a1ea2db JD |
1936 | dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "", |
1937 | force_resend_writes ? " (force resend writes)" : ""); | |
422d2cb8 | 1938 | mutex_lock(&osdc->request_mutex); |
6194ea89 | 1939 | for (p = rb_first(&osdc->requests); p; ) { |
6f6c7006 | 1940 | req = rb_entry(p, struct ceph_osd_request, r_node); |
6194ea89 | 1941 | p = rb_next(p); |
ab60b16d AE |
1942 | |
1943 | /* | |
1944 | * For linger requests that have not yet been | |
1945 | * registered, move them to the linger list; they'll | |
1946 | * be sent to the osd in the loop below. Unregister | |
1947 | * the request before re-registering it as a linger | |
1948 | * request to ensure the __map_request() below | |
1949 | * will decide it needs to be sent. | |
1950 | */ | |
1951 | if (req->r_linger && list_empty(&req->r_linger_item)) { | |
1952 | dout("%p tid %llu restart on osd%d\n", | |
1953 | req, req->r_tid, | |
1954 | req->r_osd ? req->r_osd->o_osd : -1); | |
96e4dac6 | 1955 | ceph_osdc_get_request(req); |
ab60b16d AE |
1956 | __unregister_request(osdc, req); |
1957 | __register_linger_request(osdc, req); | |
96e4dac6 | 1958 | ceph_osdc_put_request(req); |
ab60b16d AE |
1959 | continue; |
1960 | } | |
1961 | ||
9a1ea2db JD |
1962 | force_resend_req = force_resend || |
1963 | (force_resend_writes && | |
1964 | req->r_flags & CEPH_OSD_FLAG_WRITE); | |
1965 | err = __map_request(osdc, req, force_resend_req); | |
6f6c7006 SW |
1966 | if (err < 0) |
1967 | continue; /* error */ | |
1968 | if (req->r_osd == NULL) { | |
1969 | dout("%p tid %llu maps to no osd\n", req, req->r_tid); | |
1970 | needmap++; /* request a newer map */ | |
1971 | } else if (err > 0) { | |
6194ea89 SW |
1972 | if (!req->r_linger) { |
1973 | dout("%p tid %llu requeued on osd%d\n", req, | |
1974 | req->r_tid, | |
1975 | req->r_osd ? req->r_osd->o_osd : -1); | |
a40c4f10 | 1976 | req->r_flags |= CEPH_OSD_FLAG_RETRY; |
6194ea89 SW |
1977 | } |
1978 | } | |
a40c4f10 YS |
1979 | } |
1980 | ||
1981 | list_for_each_entry_safe(req, nreq, &osdc->req_linger, | |
1982 | r_linger_item) { | |
1983 | dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); | |
1984 | ||
9a1ea2db JD |
1985 | err = __map_request(osdc, req, |
1986 | force_resend || force_resend_writes); | |
ab60b16d | 1987 | dout("__map_request returned %d\n", err); |
a40c4f10 YS |
1988 | if (err == 0) |
1989 | continue; /* no change and no osd was specified */ | |
1990 | if (err < 0) | |
1991 | continue; /* hrm! */ | |
1992 | if (req->r_osd == NULL) { | |
1993 | dout("tid %llu maps to no valid osd\n", req->r_tid); | |
1994 | needmap++; /* request a newer map */ | |
1995 | continue; | |
6f6c7006 | 1996 | } |
a40c4f10 YS |
1997 | |
1998 | dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, | |
1999 | req->r_osd ? req->r_osd->o_osd : -1); | |
a40c4f10 | 2000 | __register_request(osdc, req); |
c89ce05e | 2001 | __unregister_linger_request(osdc, req); |
6f6c7006 | 2002 | } |
14d2f38d | 2003 | reset_changed_osds(osdc); |
f24e9980 SW |
2004 | mutex_unlock(&osdc->request_mutex); |
2005 | ||
2006 | if (needmap) { | |
2007 | dout("%d requests for down osds, need new map\n", needmap); | |
2008 | ceph_monc_request_next_osdmap(&osdc->client->monc); | |
2009 | } | |
422d2cb8 | 2010 | } |
6f6c7006 SW |
2011 | |
2012 | ||
f24e9980 SW |
2013 | /* |
2014 | * Process updated osd map. | |
2015 | * | |
2016 | * The message contains any number of incremental and full maps, normally | |
2017 | * indicating some sort of topology change in the cluster. Kick requests | |
2018 | * off to different OSDs as needed. | |
2019 | */ | |
2020 | void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) | |
2021 | { | |
2022 | void *p, *end, *next; | |
2023 | u32 nr_maps, maplen; | |
2024 | u32 epoch; | |
2025 | struct ceph_osdmap *newmap = NULL, *oldmap; | |
2026 | int err; | |
2027 | struct ceph_fsid fsid; | |
9a1ea2db | 2028 | bool was_full; |
f24e9980 SW |
2029 | |
2030 | dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0); | |
2031 | p = msg->front.iov_base; | |
2032 | end = p + msg->front.iov_len; | |
2033 | ||
2034 | /* verify fsid */ | |
2035 | ceph_decode_need(&p, end, sizeof(fsid), bad); | |
2036 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
0743304d SW |
2037 | if (ceph_check_fsid(osdc->client, &fsid) < 0) |
2038 | return; | |
f24e9980 SW |
2039 | |
2040 | down_write(&osdc->map_sem); | |
2041 | ||
9a1ea2db JD |
2042 | was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); |
2043 | ||
f24e9980 SW |
2044 | /* incremental maps */ |
2045 | ceph_decode_32_safe(&p, end, nr_maps, bad); | |
2046 | dout(" %d inc maps\n", nr_maps); | |
2047 | while (nr_maps > 0) { | |
2048 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); | |
c89136ea SW |
2049 | epoch = ceph_decode_32(&p); |
2050 | maplen = ceph_decode_32(&p); | |
f24e9980 SW |
2051 | ceph_decode_need(&p, end, maplen, bad); |
2052 | next = p + maplen; | |
2053 | if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) { | |
2054 | dout("applying incremental map %u len %d\n", | |
2055 | epoch, maplen); | |
2056 | newmap = osdmap_apply_incremental(&p, next, | |
2057 | osdc->osdmap, | |
15d9882c | 2058 | &osdc->client->msgr); |
f24e9980 SW |
2059 | if (IS_ERR(newmap)) { |
2060 | err = PTR_ERR(newmap); | |
2061 | goto bad; | |
2062 | } | |
30dc6381 | 2063 | BUG_ON(!newmap); |
f24e9980 SW |
2064 | if (newmap != osdc->osdmap) { |
2065 | ceph_osdmap_destroy(osdc->osdmap); | |
2066 | osdc->osdmap = newmap; | |
2067 | } | |
9a1ea2db JD |
2068 | was_full = was_full || |
2069 | ceph_osdmap_flag(osdc->osdmap, | |
2070 | CEPH_OSDMAP_FULL); | |
2071 | kick_requests(osdc, 0, was_full); | |
f24e9980 SW |
2072 | } else { |
2073 | dout("ignoring incremental map %u len %d\n", | |
2074 | epoch, maplen); | |
2075 | } | |
2076 | p = next; | |
2077 | nr_maps--; | |
2078 | } | |
2079 | if (newmap) | |
2080 | goto done; | |
2081 | ||
2082 | /* full maps */ | |
2083 | ceph_decode_32_safe(&p, end, nr_maps, bad); | |
2084 | dout(" %d full maps\n", nr_maps); | |
2085 | while (nr_maps) { | |
2086 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); | |
c89136ea SW |
2087 | epoch = ceph_decode_32(&p); |
2088 | maplen = ceph_decode_32(&p); | |
f24e9980 SW |
2089 | ceph_decode_need(&p, end, maplen, bad); |
2090 | if (nr_maps > 1) { | |
2091 | dout("skipping non-latest full map %u len %d\n", | |
2092 | epoch, maplen); | |
2093 | } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) { | |
2094 | dout("skipping full map %u len %d, " | |
2095 | "older than our %u\n", epoch, maplen, | |
2096 | osdc->osdmap->epoch); | |
2097 | } else { | |
38d6453c SW |
2098 | int skipped_map = 0; |
2099 | ||
f24e9980 | 2100 | dout("taking full map %u len %d\n", epoch, maplen); |
a2505d63 | 2101 | newmap = ceph_osdmap_decode(&p, p+maplen); |
f24e9980 SW |
2102 | if (IS_ERR(newmap)) { |
2103 | err = PTR_ERR(newmap); | |
2104 | goto bad; | |
2105 | } | |
30dc6381 | 2106 | BUG_ON(!newmap); |
f24e9980 SW |
2107 | oldmap = osdc->osdmap; |
2108 | osdc->osdmap = newmap; | |
38d6453c SW |
2109 | if (oldmap) { |
2110 | if (oldmap->epoch + 1 < newmap->epoch) | |
2111 | skipped_map = 1; | |
f24e9980 | 2112 | ceph_osdmap_destroy(oldmap); |
38d6453c | 2113 | } |
9a1ea2db JD |
2114 | was_full = was_full || |
2115 | ceph_osdmap_flag(osdc->osdmap, | |
2116 | CEPH_OSDMAP_FULL); | |
2117 | kick_requests(osdc, skipped_map, was_full); | |
f24e9980 SW |
2118 | } |
2119 | p += maplen; | |
2120 | nr_maps--; | |
2121 | } | |
2122 | ||
b72e19b9 DC |
2123 | if (!osdc->osdmap) |
2124 | goto bad; | |
f24e9980 SW |
2125 | done: |
2126 | downgrade_write(&osdc->map_sem); | |
2127 | ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); | |
cd634fb6 SW |
2128 | |
2129 | /* | |
2130 | * subscribe to subsequent osdmap updates if full to ensure | |
2131 | * we find out when we are no longer full and stop returning | |
2132 | * ENOSPC. | |
2133 | */ | |
d29adb34 JD |
2134 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || |
2135 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) || | |
2136 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) | |
cd634fb6 SW |
2137 | ceph_monc_request_next_osdmap(&osdc->client->monc); |
2138 | ||
f9d25199 AE |
2139 | mutex_lock(&osdc->request_mutex); |
2140 | __send_queued(osdc); | |
2141 | mutex_unlock(&osdc->request_mutex); | |
f24e9980 | 2142 | up_read(&osdc->map_sem); |
03066f23 | 2143 | wake_up_all(&osdc->client->auth_wq); |
f24e9980 SW |
2144 | return; |
2145 | ||
2146 | bad: | |
2147 | pr_err("osdc handle_map corrupt msg\n"); | |
9ec7cab1 | 2148 | ceph_msg_dump(msg); |
f24e9980 | 2149 | up_write(&osdc->map_sem); |
f24e9980 SW |
2150 | } |
2151 | ||
a40c4f10 YS |
2152 | /* |
2153 | * watch/notify callback event infrastructure | |
2154 | * | |
2155 | * These callbacks are used both for watch and notify operations. | |
2156 | */ | |
2157 | static void __release_event(struct kref *kref) | |
2158 | { | |
2159 | struct ceph_osd_event *event = | |
2160 | container_of(kref, struct ceph_osd_event, kref); | |
2161 | ||
2162 | dout("__release_event %p\n", event); | |
2163 | kfree(event); | |
2164 | } | |
2165 | ||
2166 | static void get_event(struct ceph_osd_event *event) | |
2167 | { | |
2168 | kref_get(&event->kref); | |
2169 | } | |
2170 | ||
2171 | void ceph_osdc_put_event(struct ceph_osd_event *event) | |
2172 | { | |
2173 | kref_put(&event->kref, __release_event); | |
2174 | } | |
2175 | EXPORT_SYMBOL(ceph_osdc_put_event); | |
2176 | ||
2177 | static void __insert_event(struct ceph_osd_client *osdc, | |
2178 | struct ceph_osd_event *new) | |
2179 | { | |
2180 | struct rb_node **p = &osdc->event_tree.rb_node; | |
2181 | struct rb_node *parent = NULL; | |
2182 | struct ceph_osd_event *event = NULL; | |
2183 | ||
2184 | while (*p) { | |
2185 | parent = *p; | |
2186 | event = rb_entry(parent, struct ceph_osd_event, node); | |
2187 | if (new->cookie < event->cookie) | |
2188 | p = &(*p)->rb_left; | |
2189 | else if (new->cookie > event->cookie) | |
2190 | p = &(*p)->rb_right; | |
2191 | else | |
2192 | BUG(); | |
2193 | } | |
2194 | ||
2195 | rb_link_node(&new->node, parent, p); | |
2196 | rb_insert_color(&new->node, &osdc->event_tree); | |
2197 | } | |
2198 | ||
2199 | static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc, | |
2200 | u64 cookie) | |
2201 | { | |
2202 | struct rb_node **p = &osdc->event_tree.rb_node; | |
2203 | struct rb_node *parent = NULL; | |
2204 | struct ceph_osd_event *event = NULL; | |
2205 | ||
2206 | while (*p) { | |
2207 | parent = *p; | |
2208 | event = rb_entry(parent, struct ceph_osd_event, node); | |
2209 | if (cookie < event->cookie) | |
2210 | p = &(*p)->rb_left; | |
2211 | else if (cookie > event->cookie) | |
2212 | p = &(*p)->rb_right; | |
2213 | else | |
2214 | return event; | |
2215 | } | |
2216 | return NULL; | |
2217 | } | |
2218 | ||
2219 | static void __remove_event(struct ceph_osd_event *event) | |
2220 | { | |
2221 | struct ceph_osd_client *osdc = event->osdc; | |
2222 | ||
2223 | if (!RB_EMPTY_NODE(&event->node)) { | |
2224 | dout("__remove_event removed %p\n", event); | |
2225 | rb_erase(&event->node, &osdc->event_tree); | |
2226 | ceph_osdc_put_event(event); | |
2227 | } else { | |
2228 | dout("__remove_event didn't remove %p\n", event); | |
2229 | } | |
2230 | } | |
2231 | ||
2232 | int ceph_osdc_create_event(struct ceph_osd_client *osdc, | |
2233 | void (*event_cb)(u64, u64, u8, void *), | |
3c663bbd | 2234 | void *data, struct ceph_osd_event **pevent) |
a40c4f10 YS |
2235 | { |
2236 | struct ceph_osd_event *event; | |
2237 | ||
2238 | event = kmalloc(sizeof(*event), GFP_NOIO); | |
2239 | if (!event) | |
2240 | return -ENOMEM; | |
2241 | ||
2242 | dout("create_event %p\n", event); | |
2243 | event->cb = event_cb; | |
3c663bbd | 2244 | event->one_shot = 0; |
a40c4f10 YS |
2245 | event->data = data; |
2246 | event->osdc = osdc; | |
2247 | INIT_LIST_HEAD(&event->osd_node); | |
3ee5234d | 2248 | RB_CLEAR_NODE(&event->node); |
a40c4f10 YS |
2249 | kref_init(&event->kref); /* one ref for us */ |
2250 | kref_get(&event->kref); /* one ref for the caller */ | |
a40c4f10 YS |
2251 | |
2252 | spin_lock(&osdc->event_lock); | |
2253 | event->cookie = ++osdc->event_count; | |
2254 | __insert_event(osdc, event); | |
2255 | spin_unlock(&osdc->event_lock); | |
2256 | ||
2257 | *pevent = event; | |
2258 | return 0; | |
2259 | } | |
2260 | EXPORT_SYMBOL(ceph_osdc_create_event); | |
2261 | ||
2262 | void ceph_osdc_cancel_event(struct ceph_osd_event *event) | |
2263 | { | |
2264 | struct ceph_osd_client *osdc = event->osdc; | |
2265 | ||
2266 | dout("cancel_event %p\n", event); | |
2267 | spin_lock(&osdc->event_lock); | |
2268 | __remove_event(event); | |
2269 | spin_unlock(&osdc->event_lock); | |
2270 | ceph_osdc_put_event(event); /* caller's */ | |
2271 | } | |
2272 | EXPORT_SYMBOL(ceph_osdc_cancel_event); | |
2273 | ||
2274 | ||
2275 | static void do_event_work(struct work_struct *work) | |
2276 | { | |
2277 | struct ceph_osd_event_work *event_work = | |
2278 | container_of(work, struct ceph_osd_event_work, work); | |
2279 | struct ceph_osd_event *event = event_work->event; | |
2280 | u64 ver = event_work->ver; | |
2281 | u64 notify_id = event_work->notify_id; | |
2282 | u8 opcode = event_work->opcode; | |
2283 | ||
2284 | dout("do_event_work completing %p\n", event); | |
2285 | event->cb(ver, notify_id, opcode, event->data); | |
a40c4f10 YS |
2286 | dout("do_event_work completed %p\n", event); |
2287 | ceph_osdc_put_event(event); | |
2288 | kfree(event_work); | |
2289 | } | |
2290 | ||
2291 | ||
2292 | /* | |
2293 | * Process osd watch notifications | |
2294 | */ | |
3c663bbd AE |
2295 | static void handle_watch_notify(struct ceph_osd_client *osdc, |
2296 | struct ceph_msg *msg) | |
a40c4f10 YS |
2297 | { |
2298 | void *p, *end; | |
2299 | u8 proto_ver; | |
2300 | u64 cookie, ver, notify_id; | |
2301 | u8 opcode; | |
2302 | struct ceph_osd_event *event; | |
2303 | struct ceph_osd_event_work *event_work; | |
2304 | ||
2305 | p = msg->front.iov_base; | |
2306 | end = p + msg->front.iov_len; | |
2307 | ||
2308 | ceph_decode_8_safe(&p, end, proto_ver, bad); | |
2309 | ceph_decode_8_safe(&p, end, opcode, bad); | |
2310 | ceph_decode_64_safe(&p, end, cookie, bad); | |
2311 | ceph_decode_64_safe(&p, end, ver, bad); | |
2312 | ceph_decode_64_safe(&p, end, notify_id, bad); | |
2313 | ||
2314 | spin_lock(&osdc->event_lock); | |
2315 | event = __find_event(osdc, cookie); | |
2316 | if (event) { | |
3c663bbd | 2317 | BUG_ON(event->one_shot); |
a40c4f10 | 2318 | get_event(event); |
a40c4f10 YS |
2319 | } |
2320 | spin_unlock(&osdc->event_lock); | |
2321 | dout("handle_watch_notify cookie %lld ver %lld event %p\n", | |
2322 | cookie, ver, event); | |
2323 | if (event) { | |
2324 | event_work = kmalloc(sizeof(*event_work), GFP_NOIO); | |
a40c4f10 YS |
2325 | if (!event_work) { |
2326 | dout("ERROR: could not allocate event_work\n"); | |
2327 | goto done_err; | |
2328 | } | |
6b0ae409 | 2329 | INIT_WORK(&event_work->work, do_event_work); |
a40c4f10 YS |
2330 | event_work->event = event; |
2331 | event_work->ver = ver; | |
2332 | event_work->notify_id = notify_id; | |
2333 | event_work->opcode = opcode; | |
2334 | if (!queue_work(osdc->notify_wq, &event_work->work)) { | |
2335 | dout("WARNING: failed to queue notify event work\n"); | |
2336 | goto done_err; | |
2337 | } | |
2338 | } | |
2339 | ||
2340 | return; | |
2341 | ||
2342 | done_err: | |
a40c4f10 YS |
2343 | ceph_osdc_put_event(event); |
2344 | return; | |
2345 | ||
2346 | bad: | |
2347 | pr_err("osdc handle_watch_notify corrupt msg\n"); | |
a40c4f10 YS |
2348 | } |
2349 | ||
e65550fd AE |
2350 | /* |
2351 | * build new request AND message | |
2352 | * | |
2353 | */ | |
2354 | void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, | |
2355 | struct ceph_snap_context *snapc, u64 snap_id, | |
2356 | struct timespec *mtime) | |
2357 | { | |
2358 | struct ceph_msg *msg = req->r_request; | |
2359 | void *p; | |
2360 | size_t msg_size; | |
2361 | int flags = req->r_flags; | |
2362 | u64 data_len; | |
2363 | unsigned int i; | |
2364 | ||
2365 | req->r_snapid = snap_id; | |
2366 | req->r_snapc = ceph_get_snap_context(snapc); | |
2367 | ||
2368 | /* encode request */ | |
2369 | msg->hdr.version = cpu_to_le16(4); | |
2370 | ||
2371 | p = msg->front.iov_base; | |
2372 | ceph_encode_32(&p, 1); /* client_inc is always 1 */ | |
2373 | req->r_request_osdmap_epoch = p; | |
2374 | p += 4; | |
2375 | req->r_request_flags = p; | |
2376 | p += 4; | |
2377 | if (req->r_flags & CEPH_OSD_FLAG_WRITE) | |
2378 | ceph_encode_timespec(p, mtime); | |
2379 | p += sizeof(struct ceph_timespec); | |
2380 | req->r_request_reassert_version = p; | |
2381 | p += sizeof(struct ceph_eversion); /* will get filled in */ | |
2382 | ||
2383 | /* oloc */ | |
2384 | ceph_encode_8(&p, 4); | |
2385 | ceph_encode_8(&p, 4); | |
2386 | ceph_encode_32(&p, 8 + 4 + 4); | |
2387 | req->r_request_pool = p; | |
2388 | p += 8; | |
2389 | ceph_encode_32(&p, -1); /* preferred */ | |
2390 | ceph_encode_32(&p, 0); /* key len */ | |
2391 | ||
2392 | ceph_encode_8(&p, 1); | |
2393 | req->r_request_pgid = p; | |
2394 | p += 8 + 4; | |
2395 | ceph_encode_32(&p, -1); /* preferred */ | |
2396 | ||
2397 | /* oid */ | |
3c972c95 ID |
2398 | ceph_encode_32(&p, req->r_base_oid.name_len); |
2399 | memcpy(p, req->r_base_oid.name, req->r_base_oid.name_len); | |
2400 | dout("oid '%.*s' len %d\n", req->r_base_oid.name_len, | |
2401 | req->r_base_oid.name, req->r_base_oid.name_len); | |
2402 | p += req->r_base_oid.name_len; | |
e65550fd AE |
2403 | |
2404 | /* ops--can imply data */ | |
2405 | ceph_encode_16(&p, (u16)req->r_num_ops); | |
2406 | data_len = 0; | |
2407 | for (i = 0; i < req->r_num_ops; i++) { | |
2408 | data_len += osd_req_encode_op(req, p, i); | |
2409 | p += sizeof(struct ceph_osd_op); | |
2410 | } | |
2411 | ||
2412 | /* snaps */ | |
2413 | ceph_encode_64(&p, req->r_snapid); | |
2414 | ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0); | |
2415 | ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0); | |
2416 | if (req->r_snapc) { | |
2417 | for (i = 0; i < snapc->num_snaps; i++) { | |
2418 | ceph_encode_64(&p, req->r_snapc->snaps[i]); | |
2419 | } | |
2420 | } | |
2421 | ||
2422 | req->r_request_attempts = p; | |
2423 | p += 4; | |
2424 | ||
2425 | /* data */ | |
2426 | if (flags & CEPH_OSD_FLAG_WRITE) { | |
2427 | u16 data_off; | |
2428 | ||
2429 | /* | |
2430 | * The header "data_off" is a hint to the receiver | |
2431 | * allowing it to align received data into its | |
2432 | * buffers such that there's no need to re-copy | |
2433 | * it before writing it to disk (direct I/O). | |
2434 | */ | |
2435 | data_off = (u16) (off & 0xffff); | |
2436 | req->r_request->hdr.data_off = cpu_to_le16(data_off); | |
2437 | } | |
2438 | req->r_request->hdr.data_len = cpu_to_le32(data_len); | |
2439 | ||
2440 | BUG_ON(p > msg->front.iov_base + msg->front.iov_len); | |
2441 | msg_size = p - msg->front.iov_base; | |
2442 | msg->front.iov_len = msg_size; | |
2443 | msg->hdr.front_len = cpu_to_le32(msg_size); | |
2444 | ||
2445 | dout("build_request msg_size was %d\n", (int)msg_size); | |
2446 | } | |
2447 | EXPORT_SYMBOL(ceph_osdc_build_request); | |
2448 | ||
70636773 AE |
2449 | /* |
2450 | * Register request, send initial attempt. | |
2451 | */ | |
2452 | int ceph_osdc_start_request(struct ceph_osd_client *osdc, | |
2453 | struct ceph_osd_request *req, | |
2454 | bool nofail) | |
2455 | { | |
0bbfdfe8 | 2456 | int rc; |
70636773 | 2457 | |
f24e9980 SW |
2458 | down_read(&osdc->map_sem); |
2459 | mutex_lock(&osdc->request_mutex); | |
0bbfdfe8 ID |
2460 | |
2461 | rc = __ceph_osdc_start_request(osdc, req, nofail); | |
2462 | ||
f24e9980 SW |
2463 | mutex_unlock(&osdc->request_mutex); |
2464 | up_read(&osdc->map_sem); | |
0bbfdfe8 | 2465 | |
f24e9980 SW |
2466 | return rc; |
2467 | } | |
3d14c5d2 | 2468 | EXPORT_SYMBOL(ceph_osdc_start_request); |
f24e9980 | 2469 | |
c9f9b93d ID |
2470 | /* |
2471 | * Unregister a registered request. The request is not completed (i.e. | |
2472 | * no callbacks or wakeups) - higher layers are supposed to know what | |
2473 | * they are canceling. | |
2474 | */ | |
2475 | void ceph_osdc_cancel_request(struct ceph_osd_request *req) | |
2476 | { | |
2477 | struct ceph_osd_client *osdc = req->r_osdc; | |
2478 | ||
2479 | mutex_lock(&osdc->request_mutex); | |
2480 | if (req->r_linger) | |
2481 | __unregister_linger_request(osdc, req); | |
2482 | __unregister_request(osdc, req); | |
2483 | mutex_unlock(&osdc->request_mutex); | |
2484 | ||
2485 | dout("%s %p tid %llu canceled\n", __func__, req, req->r_tid); | |
2486 | } | |
2487 | EXPORT_SYMBOL(ceph_osdc_cancel_request); | |
2488 | ||
f24e9980 SW |
2489 | /* |
2490 | * wait for a request to complete | |
2491 | */ | |
2492 | int ceph_osdc_wait_request(struct ceph_osd_client *osdc, | |
2493 | struct ceph_osd_request *req) | |
2494 | { | |
2495 | int rc; | |
2496 | ||
c9f9b93d ID |
2497 | dout("%s %p tid %llu\n", __func__, req, req->r_tid); |
2498 | ||
f24e9980 SW |
2499 | rc = wait_for_completion_interruptible(&req->r_completion); |
2500 | if (rc < 0) { | |
c9f9b93d ID |
2501 | dout("%s %p tid %llu interrupted\n", __func__, req, req->r_tid); |
2502 | ceph_osdc_cancel_request(req); | |
25845472 | 2503 | complete_request(req); |
f24e9980 SW |
2504 | return rc; |
2505 | } | |
2506 | ||
c9f9b93d ID |
2507 | dout("%s %p tid %llu result %d\n", __func__, req, req->r_tid, |
2508 | req->r_result); | |
f24e9980 SW |
2509 | return req->r_result; |
2510 | } | |
3d14c5d2 | 2511 | EXPORT_SYMBOL(ceph_osdc_wait_request); |
f24e9980 SW |
2512 | |
2513 | /* | |
2514 | * sync - wait for all in-flight requests to flush. avoid starvation. | |
2515 | */ | |
2516 | void ceph_osdc_sync(struct ceph_osd_client *osdc) | |
2517 | { | |
2518 | struct ceph_osd_request *req; | |
2519 | u64 last_tid, next_tid = 0; | |
2520 | ||
2521 | mutex_lock(&osdc->request_mutex); | |
2522 | last_tid = osdc->last_tid; | |
2523 | while (1) { | |
2524 | req = __lookup_request_ge(osdc, next_tid); | |
2525 | if (!req) | |
2526 | break; | |
2527 | if (req->r_tid > last_tid) | |
2528 | break; | |
2529 | ||
2530 | next_tid = req->r_tid + 1; | |
2531 | if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0) | |
2532 | continue; | |
2533 | ||
2534 | ceph_osdc_get_request(req); | |
2535 | mutex_unlock(&osdc->request_mutex); | |
2536 | dout("sync waiting on tid %llu (last is %llu)\n", | |
2537 | req->r_tid, last_tid); | |
2538 | wait_for_completion(&req->r_safe_completion); | |
2539 | mutex_lock(&osdc->request_mutex); | |
2540 | ceph_osdc_put_request(req); | |
2541 | } | |
2542 | mutex_unlock(&osdc->request_mutex); | |
2543 | dout("sync done (thru tid %llu)\n", last_tid); | |
2544 | } | |
3d14c5d2 | 2545 | EXPORT_SYMBOL(ceph_osdc_sync); |
f24e9980 | 2546 | |
dd935f44 JD |
2547 | /* |
2548 | * Call all pending notify callbacks - for use after a watch is | |
2549 | * unregistered, to make sure no more callbacks for it will be invoked | |
2550 | */ | |
f6479449 | 2551 | void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc) |
dd935f44 JD |
2552 | { |
2553 | flush_workqueue(osdc->notify_wq); | |
2554 | } | |
2555 | EXPORT_SYMBOL(ceph_osdc_flush_notifies); | |
2556 | ||
2557 | ||
f24e9980 SW |
2558 | /* |
2559 | * init, shutdown | |
2560 | */ | |
2561 | int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) | |
2562 | { | |
2563 | int err; | |
2564 | ||
2565 | dout("init\n"); | |
2566 | osdc->client = client; | |
2567 | osdc->osdmap = NULL; | |
2568 | init_rwsem(&osdc->map_sem); | |
2569 | init_completion(&osdc->map_waiters); | |
2570 | osdc->last_requested_map = 0; | |
2571 | mutex_init(&osdc->request_mutex); | |
f24e9980 SW |
2572 | osdc->last_tid = 0; |
2573 | osdc->osds = RB_ROOT; | |
f5a2041b | 2574 | INIT_LIST_HEAD(&osdc->osd_lru); |
f24e9980 | 2575 | osdc->requests = RB_ROOT; |
422d2cb8 | 2576 | INIT_LIST_HEAD(&osdc->req_lru); |
6f6c7006 SW |
2577 | INIT_LIST_HEAD(&osdc->req_unsent); |
2578 | INIT_LIST_HEAD(&osdc->req_notarget); | |
a40c4f10 | 2579 | INIT_LIST_HEAD(&osdc->req_linger); |
f24e9980 SW |
2580 | osdc->num_requests = 0; |
2581 | INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); | |
f5a2041b | 2582 | INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); |
a40c4f10 YS |
2583 | spin_lock_init(&osdc->event_lock); |
2584 | osdc->event_tree = RB_ROOT; | |
2585 | osdc->event_count = 0; | |
f5a2041b YS |
2586 | |
2587 | schedule_delayed_work(&osdc->osds_timeout_work, | |
3d14c5d2 | 2588 | round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ)); |
f24e9980 | 2589 | |
5f44f142 | 2590 | err = -ENOMEM; |
f24e9980 SW |
2591 | osdc->req_mempool = mempool_create_kmalloc_pool(10, |
2592 | sizeof(struct ceph_osd_request)); | |
2593 | if (!osdc->req_mempool) | |
5f44f142 | 2594 | goto out; |
f24e9980 | 2595 | |
d50b409f SW |
2596 | err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, |
2597 | OSD_OP_FRONT_LEN, 10, true, | |
4f48280e | 2598 | "osd_op"); |
f24e9980 | 2599 | if (err < 0) |
5f44f142 | 2600 | goto out_mempool; |
d50b409f | 2601 | err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, |
4f48280e SW |
2602 | OSD_OPREPLY_FRONT_LEN, 10, true, |
2603 | "osd_op_reply"); | |
c16e7869 SW |
2604 | if (err < 0) |
2605 | goto out_msgpool; | |
a40c4f10 | 2606 | |
dbcae088 | 2607 | err = -ENOMEM; |
a40c4f10 | 2608 | osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); |
dbcae088 | 2609 | if (!osdc->notify_wq) |
c172ec5c ID |
2610 | goto out_msgpool_reply; |
2611 | ||
f24e9980 | 2612 | return 0; |
5f44f142 | 2613 | |
c172ec5c ID |
2614 | out_msgpool_reply: |
2615 | ceph_msgpool_destroy(&osdc->msgpool_op_reply); | |
c16e7869 SW |
2616 | out_msgpool: |
2617 | ceph_msgpool_destroy(&osdc->msgpool_op); | |
5f44f142 SW |
2618 | out_mempool: |
2619 | mempool_destroy(osdc->req_mempool); | |
2620 | out: | |
2621 | return err; | |
f24e9980 SW |
2622 | } |
2623 | ||
2624 | void ceph_osdc_stop(struct ceph_osd_client *osdc) | |
2625 | { | |
a40c4f10 YS |
2626 | flush_workqueue(osdc->notify_wq); |
2627 | destroy_workqueue(osdc->notify_wq); | |
f24e9980 | 2628 | cancel_delayed_work_sync(&osdc->timeout_work); |
f5a2041b | 2629 | cancel_delayed_work_sync(&osdc->osds_timeout_work); |
f24e9980 SW |
2630 | if (osdc->osdmap) { |
2631 | ceph_osdmap_destroy(osdc->osdmap); | |
2632 | osdc->osdmap = NULL; | |
2633 | } | |
aca420bc | 2634 | remove_all_osds(osdc); |
f24e9980 SW |
2635 | mempool_destroy(osdc->req_mempool); |
2636 | ceph_msgpool_destroy(&osdc->msgpool_op); | |
c16e7869 | 2637 | ceph_msgpool_destroy(&osdc->msgpool_op_reply); |
f24e9980 SW |
2638 | } |
2639 | ||
2640 | /* | |
2641 | * Read some contiguous pages. If we cross a stripe boundary, shorten | |
2642 | * *plen. Return number of bytes read, or error. | |
2643 | */ | |
2644 | int ceph_osdc_readpages(struct ceph_osd_client *osdc, | |
2645 | struct ceph_vino vino, struct ceph_file_layout *layout, | |
2646 | u64 off, u64 *plen, | |
2647 | u32 truncate_seq, u64 truncate_size, | |
b7495fc2 | 2648 | struct page **pages, int num_pages, int page_align) |
f24e9980 SW |
2649 | { |
2650 | struct ceph_osd_request *req; | |
2651 | int rc = 0; | |
2652 | ||
2653 | dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, | |
2654 | vino.snap, off, *plen); | |
79528734 | 2655 | req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1, |
f24e9980 | 2656 | CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, |
acead002 | 2657 | NULL, truncate_seq, truncate_size, |
153e5167 | 2658 | false); |
6816282d SW |
2659 | if (IS_ERR(req)) |
2660 | return PTR_ERR(req); | |
f24e9980 SW |
2661 | |
2662 | /* it may be a short read due to an object boundary */ | |
0fff87ec | 2663 | |
406e2c9f | 2664 | osd_req_op_extent_osd_data_pages(req, 0, |
a4ce40a9 | 2665 | pages, *plen, page_align, false, false); |
f24e9980 | 2666 | |
e0c59487 | 2667 | dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", |
43bfe5de | 2668 | off, *plen, *plen, page_align); |
f24e9980 | 2669 | |
79528734 | 2670 | ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); |
02ee07d3 | 2671 | |
f24e9980 SW |
2672 | rc = ceph_osdc_start_request(osdc, req, false); |
2673 | if (!rc) | |
2674 | rc = ceph_osdc_wait_request(osdc, req); | |
2675 | ||
2676 | ceph_osdc_put_request(req); | |
2677 | dout("readpages result %d\n", rc); | |
2678 | return rc; | |
2679 | } | |
3d14c5d2 | 2680 | EXPORT_SYMBOL(ceph_osdc_readpages); |
f24e9980 SW |
2681 | |
2682 | /* | |
2683 | * do a synchronous write on N pages | |
2684 | */ | |
2685 | int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, | |
2686 | struct ceph_file_layout *layout, | |
2687 | struct ceph_snap_context *snapc, | |
2688 | u64 off, u64 len, | |
2689 | u32 truncate_seq, u64 truncate_size, | |
2690 | struct timespec *mtime, | |
24808826 | 2691 | struct page **pages, int num_pages) |
f24e9980 SW |
2692 | { |
2693 | struct ceph_osd_request *req; | |
2694 | int rc = 0; | |
b7495fc2 | 2695 | int page_align = off & ~PAGE_MASK; |
f24e9980 | 2696 | |
acead002 | 2697 | BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */ |
79528734 | 2698 | req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1, |
f24e9980 | 2699 | CEPH_OSD_OP_WRITE, |
24808826 | 2700 | CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, |
acead002 | 2701 | snapc, truncate_seq, truncate_size, |
153e5167 | 2702 | true); |
6816282d SW |
2703 | if (IS_ERR(req)) |
2704 | return PTR_ERR(req); | |
f24e9980 SW |
2705 | |
2706 | /* it may be a short write due to an object boundary */ | |
406e2c9f | 2707 | osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, |
43bfe5de AE |
2708 | false, false); |
2709 | dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); | |
f24e9980 | 2710 | |
79528734 | 2711 | ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime); |
02ee07d3 | 2712 | |
87f979d3 | 2713 | rc = ceph_osdc_start_request(osdc, req, true); |
f24e9980 SW |
2714 | if (!rc) |
2715 | rc = ceph_osdc_wait_request(osdc, req); | |
2716 | ||
2717 | ceph_osdc_put_request(req); | |
2718 | if (rc == 0) | |
2719 | rc = len; | |
2720 | dout("writepages result %d\n", rc); | |
2721 | return rc; | |
2722 | } | |
3d14c5d2 | 2723 | EXPORT_SYMBOL(ceph_osdc_writepages); |
f24e9980 | 2724 | |
5522ae0b AE |
2725 | int ceph_osdc_setup(void) |
2726 | { | |
2727 | BUG_ON(ceph_osd_request_cache); | |
2728 | ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", | |
2729 | sizeof (struct ceph_osd_request), | |
2730 | __alignof__(struct ceph_osd_request), | |
2731 | 0, NULL); | |
2732 | ||
2733 | return ceph_osd_request_cache ? 0 : -ENOMEM; | |
2734 | } | |
2735 | EXPORT_SYMBOL(ceph_osdc_setup); | |
2736 | ||
2737 | void ceph_osdc_cleanup(void) | |
2738 | { | |
2739 | BUG_ON(!ceph_osd_request_cache); | |
2740 | kmem_cache_destroy(ceph_osd_request_cache); | |
2741 | ceph_osd_request_cache = NULL; | |
2742 | } | |
2743 | EXPORT_SYMBOL(ceph_osdc_cleanup); | |
2744 | ||
f24e9980 SW |
2745 | /* |
2746 | * handle incoming message | |
2747 | */ | |
2748 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
2749 | { | |
2750 | struct ceph_osd *osd = con->private; | |
32c895e7 | 2751 | struct ceph_osd_client *osdc; |
f24e9980 SW |
2752 | int type = le16_to_cpu(msg->hdr.type); |
2753 | ||
2754 | if (!osd) | |
4a32f93d | 2755 | goto out; |
32c895e7 | 2756 | osdc = osd->o_osdc; |
f24e9980 SW |
2757 | |
2758 | switch (type) { | |
2759 | case CEPH_MSG_OSD_MAP: | |
2760 | ceph_osdc_handle_map(osdc, msg); | |
2761 | break; | |
2762 | case CEPH_MSG_OSD_OPREPLY: | |
350b1c32 | 2763 | handle_reply(osdc, msg, con); |
f24e9980 | 2764 | break; |
a40c4f10 YS |
2765 | case CEPH_MSG_WATCH_NOTIFY: |
2766 | handle_watch_notify(osdc, msg); | |
2767 | break; | |
f24e9980 SW |
2768 | |
2769 | default: | |
2770 | pr_err("received unknown message type %d %s\n", type, | |
2771 | ceph_msg_type_name(type)); | |
2772 | } | |
4a32f93d | 2773 | out: |
f24e9980 SW |
2774 | ceph_msg_put(msg); |
2775 | } | |
2776 | ||
5b3a4db3 | 2777 | /* |
21b667f6 SW |
2778 | * lookup and return message for incoming reply. set up reply message |
2779 | * pages. | |
5b3a4db3 SW |
2780 | */ |
2781 | static struct ceph_msg *get_reply(struct ceph_connection *con, | |
2450418c YS |
2782 | struct ceph_msg_header *hdr, |
2783 | int *skip) | |
f24e9980 SW |
2784 | { |
2785 | struct ceph_osd *osd = con->private; | |
2786 | struct ceph_osd_client *osdc = osd->o_osdc; | |
2450418c | 2787 | struct ceph_msg *m; |
0547a9b3 | 2788 | struct ceph_osd_request *req; |
3f0a4ac5 | 2789 | int front_len = le32_to_cpu(hdr->front_len); |
5b3a4db3 | 2790 | int data_len = le32_to_cpu(hdr->data_len); |
0547a9b3 | 2791 | u64 tid; |
f24e9980 | 2792 | |
0547a9b3 YS |
2793 | tid = le64_to_cpu(hdr->tid); |
2794 | mutex_lock(&osdc->request_mutex); | |
2795 | req = __lookup_request(osdc, tid); | |
2796 | if (!req) { | |
2797 | *skip = 1; | |
2798 | m = NULL; | |
756a16a5 SW |
2799 | dout("get_reply unknown tid %llu from osd%d\n", tid, |
2800 | osd->o_osd); | |
0547a9b3 YS |
2801 | goto out; |
2802 | } | |
c16e7869 | 2803 | |
ace6d3a9 | 2804 | if (req->r_reply->con) |
8921d114 | 2805 | dout("%s revoking msg %p from old con %p\n", __func__, |
ace6d3a9 AE |
2806 | req->r_reply, req->r_reply->con); |
2807 | ceph_msg_revoke_incoming(req->r_reply); | |
0547a9b3 | 2808 | |
f2be82b0 | 2809 | if (front_len > req->r_reply->front_alloc_len) { |
4974341e | 2810 | pr_warning("get_reply front %d > preallocated %d (%u#%llu)\n", |
f2be82b0 | 2811 | front_len, req->r_reply->front_alloc_len, |
4974341e AE |
2812 | (unsigned int)con->peer_name.type, |
2813 | le64_to_cpu(con->peer_name.num)); | |
3f0a4ac5 ID |
2814 | m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS, |
2815 | false); | |
a79832f2 | 2816 | if (!m) |
c16e7869 SW |
2817 | goto out; |
2818 | ceph_msg_put(req->r_reply); | |
2819 | req->r_reply = m; | |
2820 | } | |
2821 | m = ceph_msg_get(req->r_reply); | |
2822 | ||
0547a9b3 | 2823 | if (data_len > 0) { |
a4ce40a9 | 2824 | struct ceph_osd_data *osd_data; |
0fff87ec | 2825 | |
a4ce40a9 AE |
2826 | /* |
2827 | * XXX This is assuming there is only one op containing | |
2828 | * XXX page data. Probably OK for reads, but this | |
2829 | * XXX ought to be done more generally. | |
2830 | */ | |
406e2c9f | 2831 | osd_data = osd_req_op_extent_osd_data(req, 0); |
0fff87ec | 2832 | if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { |
0fff87ec | 2833 | if (osd_data->pages && |
e0c59487 | 2834 | unlikely(osd_data->length < data_len)) { |
2ac2b7a6 | 2835 | |
e0c59487 AE |
2836 | pr_warning("tid %lld reply has %d bytes " |
2837 | "we had only %llu bytes ready\n", | |
2838 | tid, data_len, osd_data->length); | |
2ac2b7a6 AE |
2839 | *skip = 1; |
2840 | ceph_msg_put(m); | |
2841 | m = NULL; | |
2842 | goto out; | |
2843 | } | |
2ac2b7a6 | 2844 | } |
0547a9b3 | 2845 | } |
5b3a4db3 | 2846 | *skip = 0; |
c16e7869 | 2847 | dout("get_reply tid %lld %p\n", tid, m); |
0547a9b3 YS |
2848 | |
2849 | out: | |
2850 | mutex_unlock(&osdc->request_mutex); | |
2450418c | 2851 | return m; |
5b3a4db3 SW |
2852 | |
2853 | } | |
2854 | ||
2855 | static struct ceph_msg *alloc_msg(struct ceph_connection *con, | |
2856 | struct ceph_msg_header *hdr, | |
2857 | int *skip) | |
2858 | { | |
2859 | struct ceph_osd *osd = con->private; | |
2860 | int type = le16_to_cpu(hdr->type); | |
2861 | int front = le32_to_cpu(hdr->front_len); | |
2862 | ||
1c20f2d2 | 2863 | *skip = 0; |
5b3a4db3 SW |
2864 | switch (type) { |
2865 | case CEPH_MSG_OSD_MAP: | |
a40c4f10 | 2866 | case CEPH_MSG_WATCH_NOTIFY: |
b61c2763 | 2867 | return ceph_msg_new(type, front, GFP_NOFS, false); |
5b3a4db3 SW |
2868 | case CEPH_MSG_OSD_OPREPLY: |
2869 | return get_reply(con, hdr, skip); | |
2870 | default: | |
2871 | pr_info("alloc_msg unexpected msg type %d from osd%d\n", type, | |
2872 | osd->o_osd); | |
2873 | *skip = 1; | |
2874 | return NULL; | |
2875 | } | |
f24e9980 SW |
2876 | } |
2877 | ||
2878 | /* | |
2879 | * Wrappers to refcount containing ceph_osd struct | |
2880 | */ | |
2881 | static struct ceph_connection *get_osd_con(struct ceph_connection *con) | |
2882 | { | |
2883 | struct ceph_osd *osd = con->private; | |
2884 | if (get_osd(osd)) | |
2885 | return con; | |
2886 | return NULL; | |
2887 | } | |
2888 | ||
2889 | static void put_osd_con(struct ceph_connection *con) | |
2890 | { | |
2891 | struct ceph_osd *osd = con->private; | |
2892 | put_osd(osd); | |
2893 | } | |
2894 | ||
4e7a5dcd SW |
2895 | /* |
2896 | * authentication | |
2897 | */ | |
a3530df3 AE |
2898 | /* |
2899 | * Note: returned pointer is the address of a structure that's | |
2900 | * managed separately. Caller must *not* attempt to free it. | |
2901 | */ | |
2902 | static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | |
8f43fb53 | 2903 | int *proto, int force_new) |
4e7a5dcd SW |
2904 | { |
2905 | struct ceph_osd *o = con->private; | |
2906 | struct ceph_osd_client *osdc = o->o_osdc; | |
2907 | struct ceph_auth_client *ac = osdc->client->monc.auth; | |
74f1869f | 2908 | struct ceph_auth_handshake *auth = &o->o_auth; |
4e7a5dcd | 2909 | |
74f1869f | 2910 | if (force_new && auth->authorizer) { |
27859f97 | 2911 | ceph_auth_destroy_authorizer(ac, auth->authorizer); |
74f1869f AE |
2912 | auth->authorizer = NULL; |
2913 | } | |
27859f97 SW |
2914 | if (!auth->authorizer) { |
2915 | int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, | |
2916 | auth); | |
4e7a5dcd | 2917 | if (ret) |
a3530df3 | 2918 | return ERR_PTR(ret); |
27859f97 SW |
2919 | } else { |
2920 | int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, | |
0bed9b5c SW |
2921 | auth); |
2922 | if (ret) | |
2923 | return ERR_PTR(ret); | |
4e7a5dcd | 2924 | } |
4e7a5dcd | 2925 | *proto = ac->protocol; |
74f1869f | 2926 | |
a3530df3 | 2927 | return auth; |
4e7a5dcd SW |
2928 | } |
2929 | ||
2930 | ||
2931 | static int verify_authorizer_reply(struct ceph_connection *con, int len) | |
2932 | { | |
2933 | struct ceph_osd *o = con->private; | |
2934 | struct ceph_osd_client *osdc = o->o_osdc; | |
2935 | struct ceph_auth_client *ac = osdc->client->monc.auth; | |
2936 | ||
27859f97 | 2937 | return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len); |
4e7a5dcd SW |
2938 | } |
2939 | ||
9bd2e6f8 SW |
2940 | static int invalidate_authorizer(struct ceph_connection *con) |
2941 | { | |
2942 | struct ceph_osd *o = con->private; | |
2943 | struct ceph_osd_client *osdc = o->o_osdc; | |
2944 | struct ceph_auth_client *ac = osdc->client->monc.auth; | |
2945 | ||
27859f97 | 2946 | ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); |
9bd2e6f8 SW |
2947 | return ceph_monc_validate_auth(&osdc->client->monc); |
2948 | } | |
4e7a5dcd | 2949 | |
9e32789f | 2950 | static const struct ceph_connection_operations osd_con_ops = { |
f24e9980 SW |
2951 | .get = get_osd_con, |
2952 | .put = put_osd_con, | |
2953 | .dispatch = dispatch, | |
4e7a5dcd SW |
2954 | .get_authorizer = get_authorizer, |
2955 | .verify_authorizer_reply = verify_authorizer_reply, | |
9bd2e6f8 | 2956 | .invalidate_authorizer = invalidate_authorizer, |
f24e9980 | 2957 | .alloc_msg = alloc_msg, |
81b024e7 | 2958 | .fault = osd_reset, |
f24e9980 | 2959 | }; |