Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2011, 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * cl code shared between vvp and liblustre (and other Lustre clients in the | |
37 | * future). | |
38 | * | |
39 | * Author: Nikita Danilov <nikita.danilov@sun.com> | |
40 | */ | |
41 | ||
42 | #define DEBUG_SUBSYSTEM S_LLITE | |
43 | ||
9fdaf8c0 | 44 | #include "../../include/linux/libcfs/libcfs.h" |
d7e09d03 PT |
45 | # include <linux/fs.h> |
46 | # include <linux/sched.h> | |
47 | # include <linux/mm.h> | |
48 | # include <linux/quotaops.h> | |
49 | # include <linux/highmem.h> | |
50 | # include <linux/pagemap.h> | |
51 | # include <linux/rbtree.h> | |
52 | ||
67a235f5 GKH |
53 | #include "../include/obd.h" |
54 | #include "../include/obd_support.h" | |
55 | #include "../include/lustre_fid.h" | |
56 | #include "../include/lustre_lite.h" | |
57 | #include "../include/lustre_dlm.h" | |
58 | #include "../include/lustre_ver.h" | |
59 | #include "../include/lustre_mdc.h" | |
60 | #include "../include/cl_object.h" | |
d7e09d03 | 61 | |
67a235f5 | 62 | #include "../include/lclient.h" |
d7e09d03 PT |
63 | |
64 | #include "../llite/llite_internal.h" | |
65 | ||
2d95f10e | 66 | static const struct cl_req_operations ccc_req_ops; |
d7e09d03 PT |
67 | |
68 | /* | |
69 | * ccc_ prefix stands for "Common Client Code". | |
70 | */ | |
71 | ||
72 | static struct kmem_cache *ccc_lock_kmem; | |
73 | static struct kmem_cache *ccc_object_kmem; | |
74 | static struct kmem_cache *ccc_thread_kmem; | |
75 | static struct kmem_cache *ccc_session_kmem; | |
76 | static struct kmem_cache *ccc_req_kmem; | |
77 | ||
78 | static struct lu_kmem_descr ccc_caches[] = { | |
79 | { | |
80 | .ckd_cache = &ccc_lock_kmem, | |
81 | .ckd_name = "ccc_lock_kmem", | |
d0631921 | 82 | .ckd_size = sizeof(struct ccc_lock) |
d7e09d03 PT |
83 | }, |
84 | { | |
85 | .ckd_cache = &ccc_object_kmem, | |
86 | .ckd_name = "ccc_object_kmem", | |
d0631921 | 87 | .ckd_size = sizeof(struct ccc_object) |
d7e09d03 PT |
88 | }, |
89 | { | |
90 | .ckd_cache = &ccc_thread_kmem, | |
91 | .ckd_name = "ccc_thread_kmem", | |
d0631921 | 92 | .ckd_size = sizeof(struct ccc_thread_info), |
d7e09d03 PT |
93 | }, |
94 | { | |
95 | .ckd_cache = &ccc_session_kmem, | |
96 | .ckd_name = "ccc_session_kmem", | |
d0631921 | 97 | .ckd_size = sizeof(struct ccc_session) |
d7e09d03 PT |
98 | }, |
99 | { | |
100 | .ckd_cache = &ccc_req_kmem, | |
101 | .ckd_name = "ccc_req_kmem", | |
d0631921 | 102 | .ckd_size = sizeof(struct ccc_req) |
d7e09d03 PT |
103 | }, |
104 | { | |
105 | .ckd_cache = NULL | |
106 | } | |
107 | }; | |
108 | ||
109 | /***************************************************************************** | |
110 | * | |
111 | * Vvp device and device type functions. | |
112 | * | |
113 | */ | |
114 | ||
0be19afa | 115 | void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key) |
d7e09d03 PT |
116 | { |
117 | struct ccc_thread_info *info; | |
118 | ||
0be19afa | 119 | OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, GFP_NOFS); |
d7e09d03 PT |
120 | if (info == NULL) |
121 | info = ERR_PTR(-ENOMEM); | |
122 | return info; | |
123 | } | |
124 | ||
125 | void ccc_key_fini(const struct lu_context *ctx, | |
126 | struct lu_context_key *key, void *data) | |
127 | { | |
128 | struct ccc_thread_info *info = data; | |
772f34f5 | 129 | |
d7e09d03 PT |
130 | OBD_SLAB_FREE_PTR(info, ccc_thread_kmem); |
131 | } | |
132 | ||
133 | void *ccc_session_key_init(const struct lu_context *ctx, | |
134 | struct lu_context_key *key) | |
135 | { | |
136 | struct ccc_session *session; | |
137 | ||
0be19afa | 138 | OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, GFP_NOFS); |
d7e09d03 PT |
139 | if (session == NULL) |
140 | session = ERR_PTR(-ENOMEM); | |
141 | return session; | |
142 | } | |
143 | ||
144 | void ccc_session_key_fini(const struct lu_context *ctx, | |
145 | struct lu_context_key *key, void *data) | |
146 | { | |
147 | struct ccc_session *session = data; | |
772f34f5 | 148 | |
d7e09d03 PT |
149 | OBD_SLAB_FREE_PTR(session, ccc_session_kmem); |
150 | } | |
151 | ||
152 | struct lu_context_key ccc_key = { | |
153 | .lct_tags = LCT_CL_THREAD, | |
154 | .lct_init = ccc_key_init, | |
155 | .lct_fini = ccc_key_fini | |
156 | }; | |
157 | ||
158 | struct lu_context_key ccc_session_key = { | |
159 | .lct_tags = LCT_SESSION, | |
160 | .lct_init = ccc_session_key_init, | |
161 | .lct_fini = ccc_session_key_fini | |
162 | }; | |
163 | ||
164 | ||
165 | /* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */ | |
5152bda8 | 166 | /* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */ |
d7e09d03 PT |
167 | |
168 | int ccc_device_init(const struct lu_env *env, struct lu_device *d, | |
169 | const char *name, struct lu_device *next) | |
170 | { | |
171 | struct ccc_device *vdv; | |
172 | int rc; | |
d7e09d03 PT |
173 | |
174 | vdv = lu2ccc_dev(d); | |
175 | vdv->cdv_next = lu2cl_dev(next); | |
176 | ||
177 | LASSERT(d->ld_site != NULL && next->ld_type != NULL); | |
178 | next->ld_site = d->ld_site; | |
179 | rc = next->ld_type->ldt_ops->ldto_device_init( | |
180 | env, next, next->ld_type->ldt_name, NULL); | |
181 | if (rc == 0) { | |
182 | lu_device_get(next); | |
183 | lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init); | |
184 | } | |
0a3bdb00 | 185 | return rc; |
d7e09d03 PT |
186 | } |
187 | ||
188 | struct lu_device *ccc_device_fini(const struct lu_env *env, | |
189 | struct lu_device *d) | |
190 | { | |
191 | return cl2lu_dev(lu2ccc_dev(d)->cdv_next); | |
192 | } | |
193 | ||
194 | struct lu_device *ccc_device_alloc(const struct lu_env *env, | |
195 | struct lu_device_type *t, | |
196 | struct lustre_cfg *cfg, | |
197 | const struct lu_device_operations *luops, | |
198 | const struct cl_device_operations *clops) | |
199 | { | |
200 | struct ccc_device *vdv; | |
201 | struct lu_device *lud; | |
202 | struct cl_site *site; | |
203 | int rc; | |
d7e09d03 PT |
204 | |
205 | OBD_ALLOC_PTR(vdv); | |
206 | if (vdv == NULL) | |
0a3bdb00 | 207 | return ERR_PTR(-ENOMEM); |
d7e09d03 PT |
208 | |
209 | lud = &vdv->cdv_cl.cd_lu_dev; | |
210 | cl_device_init(&vdv->cdv_cl, t); | |
211 | ccc2lu_dev(vdv)->ld_ops = luops; | |
212 | vdv->cdv_cl.cd_ops = clops; | |
213 | ||
214 | OBD_ALLOC_PTR(site); | |
215 | if (site != NULL) { | |
216 | rc = cl_site_init(site, &vdv->cdv_cl); | |
217 | if (rc == 0) | |
218 | rc = lu_site_init_finish(&site->cs_lu); | |
219 | else { | |
220 | LASSERT(lud->ld_site == NULL); | |
221 | CERROR("Cannot init lu_site, rc %d.\n", rc); | |
222 | OBD_FREE_PTR(site); | |
223 | } | |
224 | } else | |
225 | rc = -ENOMEM; | |
226 | if (rc != 0) { | |
227 | ccc_device_free(env, lud); | |
228 | lud = ERR_PTR(rc); | |
229 | } | |
0a3bdb00 | 230 | return lud; |
d7e09d03 PT |
231 | } |
232 | ||
233 | struct lu_device *ccc_device_free(const struct lu_env *env, | |
234 | struct lu_device *d) | |
235 | { | |
236 | struct ccc_device *vdv = lu2ccc_dev(d); | |
237 | struct cl_site *site = lu2cl_site(d->ld_site); | |
238 | struct lu_device *next = cl2lu_dev(vdv->cdv_next); | |
239 | ||
240 | if (d->ld_site != NULL) { | |
241 | cl_site_fini(site); | |
242 | OBD_FREE_PTR(site); | |
243 | } | |
244 | cl_device_fini(lu2cl_dev(d)); | |
245 | OBD_FREE_PTR(vdv); | |
246 | return next; | |
247 | } | |
248 | ||
249 | int ccc_req_init(const struct lu_env *env, struct cl_device *dev, | |
250 | struct cl_req *req) | |
251 | { | |
252 | struct ccc_req *vrq; | |
253 | int result; | |
254 | ||
0be19afa | 255 | OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, GFP_NOFS); |
d7e09d03 PT |
256 | if (vrq != NULL) { |
257 | cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops); | |
258 | result = 0; | |
259 | } else | |
260 | result = -ENOMEM; | |
261 | return result; | |
262 | } | |
263 | ||
264 | /** | |
265 | * An `emergency' environment used by ccc_inode_fini() when cl_env_get() | |
266 | * fails. Access to this environment is serialized by ccc_inode_fini_guard | |
267 | * mutex. | |
268 | */ | |
847d47fa | 269 | static struct lu_env *ccc_inode_fini_env; |
d7e09d03 PT |
270 | |
271 | /** | |
272 | * A mutex serializing calls to slp_inode_fini() under extreme memory | |
273 | * pressure, when environments cannot be allocated. | |
274 | */ | |
275 | static DEFINE_MUTEX(ccc_inode_fini_guard); | |
276 | static int dummy_refcheck; | |
277 | ||
278 | int ccc_global_init(struct lu_device_type *device_type) | |
279 | { | |
280 | int result; | |
281 | ||
282 | result = lu_kmem_init(ccc_caches); | |
283 | if (result) | |
284 | return result; | |
285 | ||
286 | result = lu_device_type_init(device_type); | |
287 | if (result) | |
288 | goto out_kmem; | |
289 | ||
290 | ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck, | |
291 | LCT_REMEMBER|LCT_NOREF); | |
292 | if (IS_ERR(ccc_inode_fini_env)) { | |
293 | result = PTR_ERR(ccc_inode_fini_env); | |
294 | goto out_device; | |
295 | } | |
296 | ||
297 | ccc_inode_fini_env->le_ctx.lc_cookie = 0x4; | |
298 | return 0; | |
299 | out_device: | |
300 | lu_device_type_fini(device_type); | |
301 | out_kmem: | |
302 | lu_kmem_fini(ccc_caches); | |
303 | return result; | |
304 | } | |
305 | ||
306 | void ccc_global_fini(struct lu_device_type *device_type) | |
307 | { | |
308 | if (ccc_inode_fini_env != NULL) { | |
309 | cl_env_put(ccc_inode_fini_env, &dummy_refcheck); | |
310 | ccc_inode_fini_env = NULL; | |
311 | } | |
312 | lu_device_type_fini(device_type); | |
313 | lu_kmem_fini(ccc_caches); | |
314 | } | |
315 | ||
316 | /***************************************************************************** | |
317 | * | |
318 | * Object operations. | |
319 | * | |
320 | */ | |
321 | ||
322 | struct lu_object *ccc_object_alloc(const struct lu_env *env, | |
323 | const struct lu_object_header *unused, | |
324 | struct lu_device *dev, | |
325 | const struct cl_object_operations *clops, | |
326 | const struct lu_object_operations *luops) | |
327 | { | |
328 | struct ccc_object *vob; | |
329 | struct lu_object *obj; | |
330 | ||
0be19afa | 331 | OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, GFP_NOFS); |
d7e09d03 PT |
332 | if (vob != NULL) { |
333 | struct cl_object_header *hdr; | |
334 | ||
335 | obj = ccc2lu(vob); | |
336 | hdr = &vob->cob_header; | |
337 | cl_object_header_init(hdr); | |
338 | lu_object_init(obj, &hdr->coh_lu, dev); | |
339 | lu_object_add_top(&hdr->coh_lu, obj); | |
340 | ||
341 | vob->cob_cl.co_ops = clops; | |
342 | obj->lo_ops = luops; | |
343 | } else | |
344 | obj = NULL; | |
345 | return obj; | |
346 | } | |
347 | ||
348 | int ccc_object_init0(const struct lu_env *env, | |
349 | struct ccc_object *vob, | |
350 | const struct cl_object_conf *conf) | |
351 | { | |
352 | vob->cob_inode = conf->coc_inode; | |
353 | vob->cob_transient_pages = 0; | |
354 | cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page)); | |
355 | return 0; | |
356 | } | |
357 | ||
358 | int ccc_object_init(const struct lu_env *env, struct lu_object *obj, | |
359 | const struct lu_object_conf *conf) | |
360 | { | |
361 | struct ccc_device *dev = lu2ccc_dev(obj->lo_dev); | |
362 | struct ccc_object *vob = lu2ccc(obj); | |
363 | struct lu_object *below; | |
364 | struct lu_device *under; | |
365 | int result; | |
366 | ||
367 | under = &dev->cdv_next->cd_lu_dev; | |
368 | below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); | |
369 | if (below != NULL) { | |
370 | const struct cl_object_conf *cconf; | |
371 | ||
372 | cconf = lu2cl_conf(conf); | |
373 | INIT_LIST_HEAD(&vob->cob_pending_list); | |
374 | lu_object_add(obj, below); | |
375 | result = ccc_object_init0(env, vob, cconf); | |
376 | } else | |
377 | result = -ENOMEM; | |
378 | return result; | |
379 | } | |
380 | ||
381 | void ccc_object_free(const struct lu_env *env, struct lu_object *obj) | |
382 | { | |
383 | struct ccc_object *vob = lu2ccc(obj); | |
384 | ||
385 | lu_object_fini(obj); | |
386 | lu_object_header_fini(obj->lo_header); | |
387 | OBD_SLAB_FREE_PTR(vob, ccc_object_kmem); | |
388 | } | |
389 | ||
390 | int ccc_lock_init(const struct lu_env *env, | |
391 | struct cl_object *obj, struct cl_lock *lock, | |
392 | const struct cl_io *unused, | |
393 | const struct cl_lock_operations *lkops) | |
394 | { | |
395 | struct ccc_lock *clk; | |
396 | int result; | |
397 | ||
398 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
399 | ||
0be19afa | 400 | OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, GFP_NOFS); |
d7e09d03 PT |
401 | if (clk != NULL) { |
402 | cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops); | |
403 | result = 0; | |
404 | } else | |
405 | result = -ENOMEM; | |
406 | return result; | |
407 | } | |
408 | ||
409 | int ccc_attr_set(const struct lu_env *env, struct cl_object *obj, | |
410 | const struct cl_attr *attr, unsigned valid) | |
411 | { | |
412 | return 0; | |
413 | } | |
414 | ||
415 | int ccc_object_glimpse(const struct lu_env *env, | |
416 | const struct cl_object *obj, struct ost_lvb *lvb) | |
417 | { | |
418 | struct inode *inode = ccc_object_inode(obj); | |
419 | ||
d7e09d03 PT |
420 | lvb->lvb_mtime = cl_inode_mtime(inode); |
421 | lvb->lvb_atime = cl_inode_atime(inode); | |
422 | lvb->lvb_ctime = cl_inode_ctime(inode); | |
423 | /* | |
424 | * LU-417: Add dirty pages block count lest i_blocks reports 0, some | |
425 | * "cp" or "tar" on remote node may think it's a completely sparse file | |
426 | * and skip it. | |
427 | */ | |
428 | if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0) | |
429 | lvb->lvb_blocks = dirty_cnt(inode); | |
0a3bdb00 | 430 | return 0; |
d7e09d03 PT |
431 | } |
432 | ||
433 | ||
434 | ||
435 | int ccc_conf_set(const struct lu_env *env, struct cl_object *obj, | |
436 | const struct cl_object_conf *conf) | |
437 | { | |
438 | /* TODO: destroy all pages attached to this object. */ | |
439 | return 0; | |
440 | } | |
441 | ||
442 | static void ccc_object_size_lock(struct cl_object *obj) | |
443 | { | |
444 | struct inode *inode = ccc_object_inode(obj); | |
445 | ||
446 | cl_isize_lock(inode); | |
447 | cl_object_attr_lock(obj); | |
448 | } | |
449 | ||
450 | static void ccc_object_size_unlock(struct cl_object *obj) | |
451 | { | |
452 | struct inode *inode = ccc_object_inode(obj); | |
453 | ||
454 | cl_object_attr_unlock(obj); | |
455 | cl_isize_unlock(inode); | |
456 | } | |
457 | ||
458 | /***************************************************************************** | |
459 | * | |
460 | * Page operations. | |
461 | * | |
462 | */ | |
463 | ||
464 | struct page *ccc_page_vmpage(const struct lu_env *env, | |
465 | const struct cl_page_slice *slice) | |
466 | { | |
467 | return cl2vm_page(slice); | |
468 | } | |
469 | ||
470 | int ccc_page_is_under_lock(const struct lu_env *env, | |
471 | const struct cl_page_slice *slice, | |
472 | struct cl_io *io) | |
473 | { | |
474 | struct ccc_io *cio = ccc_env_io(env); | |
475 | struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr; | |
476 | struct cl_page *page = slice->cpl_page; | |
477 | ||
478 | int result; | |
479 | ||
d7e09d03 PT |
480 | if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || |
481 | io->ci_type == CIT_FAULT) { | |
482 | if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED) | |
483 | result = -EBUSY; | |
484 | else { | |
485 | desc->cld_start = page->cp_index; | |
486 | desc->cld_end = page->cp_index; | |
487 | desc->cld_obj = page->cp_obj; | |
488 | desc->cld_mode = CLM_READ; | |
489 | result = cl_queue_match(&io->ci_lockset.cls_done, | |
490 | desc) ? -EBUSY : 0; | |
491 | } | |
492 | } else | |
493 | result = 0; | |
0a3bdb00 | 494 | return result; |
d7e09d03 PT |
495 | } |
496 | ||
497 | int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice) | |
498 | { | |
499 | /* | |
500 | * Cached read? | |
501 | */ | |
502 | LBUG(); | |
503 | return 0; | |
504 | } | |
505 | ||
506 | void ccc_transient_page_verify(const struct cl_page *page) | |
507 | { | |
508 | } | |
509 | ||
510 | int ccc_transient_page_own(const struct lu_env *env, | |
511 | const struct cl_page_slice *slice, | |
512 | struct cl_io *unused, | |
513 | int nonblock) | |
514 | { | |
515 | ccc_transient_page_verify(slice->cpl_page); | |
516 | return 0; | |
517 | } | |
518 | ||
519 | void ccc_transient_page_assume(const struct lu_env *env, | |
520 | const struct cl_page_slice *slice, | |
521 | struct cl_io *unused) | |
522 | { | |
523 | ccc_transient_page_verify(slice->cpl_page); | |
524 | } | |
525 | ||
526 | void ccc_transient_page_unassume(const struct lu_env *env, | |
527 | const struct cl_page_slice *slice, | |
528 | struct cl_io *unused) | |
529 | { | |
530 | ccc_transient_page_verify(slice->cpl_page); | |
531 | } | |
532 | ||
533 | void ccc_transient_page_disown(const struct lu_env *env, | |
534 | const struct cl_page_slice *slice, | |
535 | struct cl_io *unused) | |
536 | { | |
537 | ccc_transient_page_verify(slice->cpl_page); | |
538 | } | |
539 | ||
540 | void ccc_transient_page_discard(const struct lu_env *env, | |
541 | const struct cl_page_slice *slice, | |
542 | struct cl_io *unused) | |
543 | { | |
544 | struct cl_page *page = slice->cpl_page; | |
545 | ||
546 | ccc_transient_page_verify(slice->cpl_page); | |
547 | ||
548 | /* | |
549 | * For transient pages, remove it from the radix tree. | |
550 | */ | |
551 | cl_page_delete(env, page); | |
552 | } | |
553 | ||
554 | int ccc_transient_page_prep(const struct lu_env *env, | |
555 | const struct cl_page_slice *slice, | |
556 | struct cl_io *unused) | |
557 | { | |
d7e09d03 | 558 | /* transient page should always be sent. */ |
0a3bdb00 | 559 | return 0; |
d7e09d03 PT |
560 | } |
561 | ||
562 | /***************************************************************************** | |
563 | * | |
564 | * Lock operations. | |
565 | * | |
566 | */ | |
567 | ||
568 | void ccc_lock_delete(const struct lu_env *env, | |
569 | const struct cl_lock_slice *slice) | |
570 | { | |
571 | CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); | |
572 | } | |
573 | ||
574 | void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice) | |
575 | { | |
576 | struct ccc_lock *clk = cl2ccc_lock(slice); | |
772f34f5 | 577 | |
d7e09d03 PT |
578 | OBD_SLAB_FREE_PTR(clk, ccc_lock_kmem); |
579 | } | |
580 | ||
581 | int ccc_lock_enqueue(const struct lu_env *env, | |
582 | const struct cl_lock_slice *slice, | |
583 | struct cl_io *unused, __u32 enqflags) | |
584 | { | |
585 | CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); | |
586 | return 0; | |
587 | } | |
588 | ||
589 | int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice) | |
590 | { | |
591 | CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); | |
592 | return 0; | |
593 | } | |
594 | ||
595 | int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice) | |
596 | { | |
597 | CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); | |
598 | return 0; | |
599 | } | |
600 | ||
601 | /** | |
602 | * Implementation of cl_lock_operations::clo_fits_into() methods for ccc | |
603 | * layer. This function is executed every time io finds an existing lock in | |
604 | * the lock cache while creating new lock. This function has to decide whether | |
605 | * cached lock "fits" into io. | |
606 | * | |
607 | * \param slice lock to be checked | |
608 | * \param io IO that wants a lock. | |
609 | * | |
610 | * \see lov_lock_fits_into(). | |
611 | */ | |
612 | int ccc_lock_fits_into(const struct lu_env *env, | |
613 | const struct cl_lock_slice *slice, | |
614 | const struct cl_lock_descr *need, | |
615 | const struct cl_io *io) | |
616 | { | |
617 | const struct cl_lock *lock = slice->cls_lock; | |
618 | const struct cl_lock_descr *descr = &lock->cll_descr; | |
619 | const struct ccc_io *cio = ccc_env_io(env); | |
620 | int result; | |
621 | ||
d7e09d03 PT |
622 | /* |
623 | * Work around DLM peculiarity: it assumes that glimpse | |
624 | * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock | |
625 | * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make | |
626 | * sure that glimpse doesn't get CLM_WRITE top-lock, so that it | |
627 | * doesn't enqueue CLM_WRITE sub-locks. | |
628 | */ | |
629 | if (cio->cui_glimpse) | |
630 | result = descr->cld_mode != CLM_WRITE; | |
631 | ||
632 | /* | |
633 | * Also, don't match incomplete write locks for read, otherwise read | |
634 | * would enqueue missing sub-locks in the write mode. | |
635 | */ | |
636 | else if (need->cld_mode != descr->cld_mode) | |
637 | result = lock->cll_state >= CLS_ENQUEUED; | |
638 | else | |
639 | result = 1; | |
0a3bdb00 | 640 | return result; |
d7e09d03 PT |
641 | } |
642 | ||
643 | /** | |
644 | * Implements cl_lock_operations::clo_state() method for ccc layer, invoked | |
645 | * whenever lock state changes. Transfers object attributes, that might be | |
646 | * updated as a result of lock acquiring into inode. | |
647 | */ | |
648 | void ccc_lock_state(const struct lu_env *env, | |
649 | const struct cl_lock_slice *slice, | |
650 | enum cl_lock_state state) | |
651 | { | |
652 | struct cl_lock *lock = slice->cls_lock; | |
d7e09d03 PT |
653 | |
654 | /* | |
655 | * Refresh inode attributes when the lock is moving into CLS_HELD | |
656 | * state, and only when this is a result of real enqueue, rather than | |
657 | * of finding lock in the cache. | |
658 | */ | |
659 | if (state == CLS_HELD && lock->cll_state < CLS_HELD) { | |
660 | struct cl_object *obj; | |
661 | struct inode *inode; | |
662 | ||
663 | obj = slice->cls_obj; | |
664 | inode = ccc_object_inode(obj); | |
665 | ||
666 | /* vmtruncate() sets the i_size | |
667 | * under both a DLM lock and the | |
668 | * ll_inode_size_lock(). If we don't get the | |
669 | * ll_inode_size_lock() here we can match the DLM lock and | |
670 | * reset i_size. generic_file_write can then trust the | |
671 | * stale i_size when doing appending writes and effectively | |
672 | * cancel the result of the truncate. Getting the | |
673 | * ll_inode_size_lock() after the enqueue maintains the DLM | |
674 | * -> ll_inode_size_lock() acquiring order. */ | |
675 | if (lock->cll_descr.cld_start == 0 && | |
676 | lock->cll_descr.cld_end == CL_PAGE_EOF) | |
677 | cl_merge_lvb(env, inode); | |
678 | } | |
d7e09d03 PT |
679 | } |
680 | ||
681 | /***************************************************************************** | |
682 | * | |
683 | * io operations. | |
684 | * | |
685 | */ | |
686 | ||
687 | void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) | |
688 | { | |
689 | struct cl_io *io = ios->cis_io; | |
690 | ||
691 | CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); | |
692 | } | |
693 | ||
694 | int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, | |
695 | __u32 enqflags, enum cl_lock_mode mode, | |
696 | pgoff_t start, pgoff_t end) | |
697 | { | |
698 | struct ccc_io *cio = ccc_env_io(env); | |
699 | struct cl_lock_descr *descr = &cio->cui_link.cill_descr; | |
700 | struct cl_object *obj = io->ci_obj; | |
701 | ||
702 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
d7e09d03 PT |
703 | |
704 | CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end); | |
705 | ||
ec83e611 | 706 | memset(&cio->cui_link, 0, sizeof(cio->cui_link)); |
d7e09d03 PT |
707 | |
708 | if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { | |
709 | descr->cld_mode = CLM_GROUP; | |
710 | descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid; | |
711 | } else { | |
712 | descr->cld_mode = mode; | |
713 | } | |
714 | descr->cld_obj = obj; | |
715 | descr->cld_start = start; | |
716 | descr->cld_end = end; | |
717 | descr->cld_enq_flags = enqflags; | |
718 | ||
719 | cl_io_lock_add(env, io, &cio->cui_link); | |
0a3bdb00 | 720 | return 0; |
d7e09d03 PT |
721 | } |
722 | ||
723 | void ccc_io_update_iov(const struct lu_env *env, | |
724 | struct ccc_io *cio, struct cl_io *io) | |
725 | { | |
d7e09d03 PT |
726 | size_t size = io->u.ci_rw.crw_count; |
727 | ||
b42b15fd | 728 | if (!cl_is_normalio(env, io) || cio->cui_iter == NULL) |
d7e09d03 PT |
729 | return; |
730 | ||
b42b15fd | 731 | iov_iter_truncate(cio->cui_iter, size); |
d7e09d03 PT |
732 | } |
733 | ||
734 | int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io, | |
735 | __u32 enqflags, enum cl_lock_mode mode, | |
736 | loff_t start, loff_t end) | |
737 | { | |
738 | struct cl_object *obj = io->ci_obj; | |
772f34f5 | 739 | |
d7e09d03 PT |
740 | return ccc_io_one_lock_index(env, io, enqflags, mode, |
741 | cl_index(obj, start), cl_index(obj, end)); | |
742 | } | |
743 | ||
744 | void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios) | |
745 | { | |
746 | CLOBINVRNT(env, ios->cis_io->ci_obj, | |
747 | ccc_object_invariant(ios->cis_io->ci_obj)); | |
748 | } | |
749 | ||
750 | void ccc_io_advance(const struct lu_env *env, | |
751 | const struct cl_io_slice *ios, | |
752 | size_t nob) | |
753 | { | |
754 | struct ccc_io *cio = cl2ccc_io(env, ios); | |
755 | struct cl_io *io = ios->cis_io; | |
756 | struct cl_object *obj = ios->cis_io->ci_obj; | |
757 | ||
758 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
759 | ||
760 | if (!cl_is_normalio(env, io)) | |
761 | return; | |
762 | ||
b42b15fd | 763 | iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count -= nob); |
d7e09d03 PT |
764 | } |
765 | ||
766 | /** | |
767 | * Helper function that if necessary adjusts file size (inode->i_size), when | |
768 | * position at the offset \a pos is accessed. File size can be arbitrary stale | |
769 | * on a Lustre client, but client at least knows KMS. If accessed area is | |
770 | * inside [0, KMS], set file size to KMS, otherwise glimpse file size. | |
771 | * | |
772 | * Locking: cl_isize_lock is used to serialize changes to inode size and to | |
773 | * protect consistency between inode size and cl_object | |
774 | * attributes. cl_object_size_lock() protects consistency between cl_attr's of | |
775 | * top-object and sub-objects. | |
776 | */ | |
777 | int ccc_prep_size(const struct lu_env *env, struct cl_object *obj, | |
778 | struct cl_io *io, loff_t start, size_t count, int *exceed) | |
779 | { | |
780 | struct cl_attr *attr = ccc_env_thread_attr(env); | |
781 | struct inode *inode = ccc_object_inode(obj); | |
782 | loff_t pos = start + count - 1; | |
783 | loff_t kms; | |
784 | int result; | |
785 | ||
786 | /* | |
787 | * Consistency guarantees: following possibilities exist for the | |
788 | * relation between region being accessed and real file size at this | |
789 | * moment: | |
790 | * | |
791 | * (A): the region is completely inside of the file; | |
792 | * | |
793 | * (B-x): x bytes of region are inside of the file, the rest is | |
794 | * outside; | |
795 | * | |
796 | * (C): the region is completely outside of the file. | |
797 | * | |
798 | * This classification is stable under DLM lock already acquired by | |
799 | * the caller, because to change the class, other client has to take | |
800 | * DLM lock conflicting with our lock. Also, any updates to ->i_size | |
801 | * by other threads on this client are serialized by | |
802 | * ll_inode_size_lock(). This guarantees that short reads are handled | |
803 | * correctly in the face of concurrent writes and truncates. | |
804 | */ | |
805 | ccc_object_size_lock(obj); | |
806 | result = cl_object_attr_get(env, obj, attr); | |
807 | if (result == 0) { | |
808 | kms = attr->cat_kms; | |
809 | if (pos > kms) { | |
810 | /* | |
811 | * A glimpse is necessary to determine whether we | |
812 | * return a short read (B) or some zeroes at the end | |
813 | * of the buffer (C) | |
814 | */ | |
815 | ccc_object_size_unlock(obj); | |
816 | result = cl_glimpse_lock(env, io, inode, obj, 0); | |
817 | if (result == 0 && exceed != NULL) { | |
818 | /* If objective page index exceed end-of-file | |
819 | * page index, return directly. Do not expect | |
820 | * kernel will check such case correctly. | |
821 | * linux-2.6.18-128.1.1 miss to do that. | |
822 | * --bug 17336 */ | |
823 | loff_t size = cl_isize_read(inode); | |
81851d46 AB |
824 | loff_t cur_index = start >> PAGE_CACHE_SHIFT; |
825 | loff_t size_index = ((size - 1) >> PAGE_CACHE_SHIFT); | |
d7e09d03 PT |
826 | |
827 | if ((size == 0 && cur_index != 0) || | |
81851d46 | 828 | size_index < cur_index) |
98e11370 | 829 | *exceed = 1; |
d7e09d03 PT |
830 | } |
831 | return result; | |
832 | } else { | |
833 | /* | |
834 | * region is within kms and, hence, within real file | |
835 | * size (A). We need to increase i_size to cover the | |
836 | * read region so that generic_file_read() will do its | |
837 | * job, but that doesn't mean the kms size is | |
838 | * _correct_, it is only the _minimum_ size. If | |
839 | * someone does a stat they will get the correct size | |
840 | * which will always be >= the kms value here. | |
841 | * b=11081 | |
842 | */ | |
843 | if (cl_isize_read(inode) < kms) { | |
844 | cl_isize_write_nolock(inode, kms); | |
845 | CDEBUG(D_VFSTRACE, | |
b0f5aad5 | 846 | DFID" updating i_size %llu\n", |
d7e09d03 PT |
847 | PFID(lu_object_fid(&obj->co_lu)), |
848 | (__u64)cl_isize_read(inode)); | |
849 | ||
850 | } | |
851 | } | |
852 | } | |
853 | ccc_object_size_unlock(obj); | |
854 | return result; | |
855 | } | |
856 | ||
857 | /***************************************************************************** | |
858 | * | |
859 | * Transfer operations. | |
860 | * | |
861 | */ | |
862 | ||
863 | void ccc_req_completion(const struct lu_env *env, | |
864 | const struct cl_req_slice *slice, int ioret) | |
865 | { | |
866 | struct ccc_req *vrq; | |
867 | ||
868 | if (ioret > 0) | |
869 | cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret); | |
870 | ||
871 | vrq = cl2ccc_req(slice); | |
872 | OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem); | |
873 | } | |
874 | ||
875 | /** | |
876 | * Implementation of struct cl_req_operations::cro_attr_set() for ccc | |
877 | * layer. ccc is responsible for | |
878 | * | |
879 | * - o_[mac]time | |
880 | * | |
881 | * - o_mode | |
882 | * | |
883 | * - o_parent_seq | |
884 | * | |
885 | * - o_[ug]id | |
886 | * | |
887 | * - o_parent_oid | |
888 | * | |
889 | * - o_parent_ver | |
890 | * | |
891 | * - o_ioepoch, | |
892 | * | |
893 | * and capability. | |
894 | */ | |
895 | void ccc_req_attr_set(const struct lu_env *env, | |
896 | const struct cl_req_slice *slice, | |
897 | const struct cl_object *obj, | |
21aef7d9 | 898 | struct cl_req_attr *attr, u64 flags) |
d7e09d03 PT |
899 | { |
900 | struct inode *inode; | |
901 | struct obdo *oa; | |
21aef7d9 | 902 | u32 valid_flags; |
d7e09d03 PT |
903 | |
904 | oa = attr->cra_oa; | |
905 | inode = ccc_object_inode(obj); | |
906 | valid_flags = OBD_MD_FLTYPE; | |
907 | ||
908 | if ((flags & OBD_MD_FLOSSCAPA) != 0) { | |
909 | LASSERT(attr->cra_capa == NULL); | |
910 | attr->cra_capa = cl_capa_lookup(inode, | |
911 | slice->crs_req->crq_type); | |
912 | } | |
913 | ||
914 | if (slice->crs_req->crq_type == CRT_WRITE) { | |
915 | if (flags & OBD_MD_FLEPOCH) { | |
916 | oa->o_valid |= OBD_MD_FLEPOCH; | |
917 | oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch; | |
918 | valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME | | |
919 | OBD_MD_FLUID | OBD_MD_FLGID; | |
920 | } | |
921 | } | |
922 | obdo_from_inode(oa, inode, valid_flags & flags); | |
923 | obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid); | |
924 | memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid, | |
925 | JOBSTATS_JOBID_SIZE); | |
926 | } | |
927 | ||
2d95f10e | 928 | static const struct cl_req_operations ccc_req_ops = { |
d7e09d03 PT |
929 | .cro_attr_set = ccc_req_attr_set, |
930 | .cro_completion = ccc_req_completion | |
931 | }; | |
932 | ||
933 | int cl_setattr_ost(struct inode *inode, const struct iattr *attr, | |
934 | struct obd_capa *capa) | |
935 | { | |
936 | struct lu_env *env; | |
937 | struct cl_io *io; | |
938 | int result; | |
939 | int refcheck; | |
940 | ||
d7e09d03 PT |
941 | env = cl_env_get(&refcheck); |
942 | if (IS_ERR(env)) | |
0a3bdb00 | 943 | return PTR_ERR(env); |
d7e09d03 PT |
944 | |
945 | io = ccc_env_thread_io(env); | |
946 | io->ci_obj = cl_i2info(inode)->lli_clob; | |
947 | ||
948 | io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime); | |
949 | io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime); | |
950 | io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime); | |
951 | io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size; | |
952 | io->u.ci_setattr.sa_valid = attr->ia_valid; | |
953 | io->u.ci_setattr.sa_capa = capa; | |
954 | ||
955 | again: | |
956 | if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { | |
957 | struct ccc_io *cio = ccc_env_io(env); | |
958 | ||
959 | if (attr->ia_valid & ATTR_FILE) | |
960 | /* populate the file descriptor for ftruncate to honor | |
961 | * group lock - see LU-787 */ | |
962 | cio->cui_fd = cl_iattr2fd(inode, attr); | |
963 | ||
964 | result = cl_io_loop(env, io); | |
965 | } else { | |
966 | result = io->ci_result; | |
967 | } | |
968 | cl_io_fini(env, io); | |
969 | if (unlikely(io->ci_need_restart)) | |
970 | goto again; | |
5ea17d6c JL |
971 | /* HSM import case: file is released, cannot be restored |
972 | * no need to fail except if restore registration failed | |
973 | * with -ENODATA */ | |
974 | if (result == -ENODATA && io->ci_restore_needed && | |
975 | io->ci_result != -ENODATA) | |
976 | result = 0; | |
d7e09d03 | 977 | cl_env_put(env, &refcheck); |
0a3bdb00 | 978 | return result; |
d7e09d03 PT |
979 | } |
980 | ||
981 | /***************************************************************************** | |
982 | * | |
983 | * Type conversions. | |
984 | * | |
985 | */ | |
986 | ||
987 | struct lu_device *ccc2lu_dev(struct ccc_device *vdv) | |
988 | { | |
989 | return &vdv->cdv_cl.cd_lu_dev; | |
990 | } | |
991 | ||
992 | struct ccc_device *lu2ccc_dev(const struct lu_device *d) | |
993 | { | |
994 | return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev); | |
995 | } | |
996 | ||
997 | struct ccc_device *cl2ccc_dev(const struct cl_device *d) | |
998 | { | |
999 | return container_of0(d, struct ccc_device, cdv_cl); | |
1000 | } | |
1001 | ||
1002 | struct lu_object *ccc2lu(struct ccc_object *vob) | |
1003 | { | |
1004 | return &vob->cob_cl.co_lu; | |
1005 | } | |
1006 | ||
1007 | struct ccc_object *lu2ccc(const struct lu_object *obj) | |
1008 | { | |
1009 | return container_of0(obj, struct ccc_object, cob_cl.co_lu); | |
1010 | } | |
1011 | ||
1012 | struct ccc_object *cl2ccc(const struct cl_object *obj) | |
1013 | { | |
1014 | return container_of0(obj, struct ccc_object, cob_cl); | |
1015 | } | |
1016 | ||
1017 | struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice) | |
1018 | { | |
1019 | return container_of(slice, struct ccc_lock, clk_cl); | |
1020 | } | |
1021 | ||
1022 | struct ccc_io *cl2ccc_io(const struct lu_env *env, | |
1023 | const struct cl_io_slice *slice) | |
1024 | { | |
1025 | struct ccc_io *cio; | |
1026 | ||
1027 | cio = container_of(slice, struct ccc_io, cui_cl); | |
1028 | LASSERT(cio == ccc_env_io(env)); | |
1029 | return cio; | |
1030 | } | |
1031 | ||
1032 | struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice) | |
1033 | { | |
1034 | return container_of0(slice, struct ccc_req, crq_cl); | |
1035 | } | |
1036 | ||
1037 | struct page *cl2vm_page(const struct cl_page_slice *slice) | |
1038 | { | |
1039 | return cl2ccc_page(slice)->cpg_page; | |
1040 | } | |
1041 | ||
1042 | /***************************************************************************** | |
1043 | * | |
1044 | * Accessors. | |
1045 | * | |
1046 | */ | |
1047 | int ccc_object_invariant(const struct cl_object *obj) | |
1048 | { | |
1049 | struct inode *inode = ccc_object_inode(obj); | |
1050 | struct cl_inode_info *lli = cl_i2info(inode); | |
1051 | ||
1052 | return (S_ISREG(cl_inode_mode(inode)) || | |
1053 | /* i_mode of unlinked inode is zeroed. */ | |
1054 | cl_inode_mode(inode) == 0) && lli->lli_clob == obj; | |
1055 | } | |
1056 | ||
1057 | struct inode *ccc_object_inode(const struct cl_object *obj) | |
1058 | { | |
1059 | return cl2ccc(obj)->cob_inode; | |
1060 | } | |
1061 | ||
1062 | /** | |
1063 | * Returns a pointer to cl_page associated with \a vmpage, without acquiring | |
1064 | * additional reference to the resulting page. This is an unsafe version of | |
1065 | * cl_vmpage_page() that can only be used under vmpage lock. | |
1066 | */ | |
1067 | struct cl_page *ccc_vmpage_page_transient(struct page *vmpage) | |
1068 | { | |
1069 | KLASSERT(PageLocked(vmpage)); | |
1070 | return (struct cl_page *)vmpage->private; | |
1071 | } | |
1072 | ||
1073 | /** | |
1074 | * Initialize or update CLIO structures for regular files when new | |
1075 | * meta-data arrives from the server. | |
1076 | * | |
1077 | * \param inode regular file inode | |
1078 | * \param md new file metadata from MDS | |
1079 | * - allocates cl_object if necessary, | |
1080 | * - updated layout, if object was already here. | |
1081 | */ | |
1082 | int cl_file_inode_init(struct inode *inode, struct lustre_md *md) | |
1083 | { | |
1084 | struct lu_env *env; | |
1085 | struct cl_inode_info *lli; | |
1086 | struct cl_object *clob; | |
1087 | struct lu_site *site; | |
1088 | struct lu_fid *fid; | |
1089 | struct cl_object_conf conf = { | |
1090 | .coc_inode = inode, | |
1091 | .u = { | |
1092 | .coc_md = md | |
1093 | } | |
1094 | }; | |
1095 | int result = 0; | |
1096 | int refcheck; | |
1097 | ||
1098 | LASSERT(md->body->valid & OBD_MD_FLID); | |
1099 | LASSERT(S_ISREG(cl_inode_mode(inode))); | |
1100 | ||
1101 | env = cl_env_get(&refcheck); | |
1102 | if (IS_ERR(env)) | |
1103 | return PTR_ERR(env); | |
1104 | ||
1105 | site = cl_i2sbi(inode)->ll_site; | |
1106 | lli = cl_i2info(inode); | |
1107 | fid = &lli->lli_fid; | |
1108 | LASSERT(fid_is_sane(fid)); | |
1109 | ||
1110 | if (lli->lli_clob == NULL) { | |
1111 | /* clob is slave of inode, empty lli_clob means for new inode, | |
1112 | * there is no clob in cache with the given fid, so it is | |
1113 | * unnecessary to perform lookup-alloc-lookup-insert, just | |
1114 | * alloc and insert directly. */ | |
1115 | LASSERT(inode->i_state & I_NEW); | |
1116 | conf.coc_lu.loc_flags = LOC_F_NEW; | |
1117 | clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev), | |
1118 | fid, &conf); | |
1119 | if (!IS_ERR(clob)) { | |
1120 | /* | |
1121 | * No locking is necessary, as new inode is | |
1122 | * locked by I_NEW bit. | |
1123 | */ | |
1124 | lli->lli_clob = clob; | |
5dd16419 | 1125 | lli->lli_has_smd = lsm_has_objects(md->lsm); |
d7e09d03 PT |
1126 | lu_object_ref_add(&clob->co_lu, "inode", inode); |
1127 | } else | |
1128 | result = PTR_ERR(clob); | |
1129 | } else { | |
1130 | result = cl_conf_set(env, lli->lli_clob, &conf); | |
1131 | } | |
1132 | ||
1133 | cl_env_put(env, &refcheck); | |
1134 | ||
1135 | if (result != 0) | |
1136 | CERROR("Failure to initialize cl object "DFID": %d\n", | |
1137 | PFID(fid), result); | |
1138 | return result; | |
1139 | } | |
1140 | ||
1141 | /** | |
1142 | * Wait for others drop their references of the object at first, then we drop | |
1143 | * the last one, which will lead to the object be destroyed immediately. | |
1144 | * Must be called after cl_object_kill() against this object. | |
1145 | * | |
1146 | * The reason we want to do this is: destroying top object will wait for sub | |
1147 | * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs) | |
1148 | * to initiate top object destroying which may deadlock. See bz22520. | |
1149 | */ | |
1150 | static void cl_object_put_last(struct lu_env *env, struct cl_object *obj) | |
1151 | { | |
1152 | struct lu_object_header *header = obj->co_lu.lo_header; | |
1153 | wait_queue_t waiter; | |
1154 | ||
1155 | if (unlikely(atomic_read(&header->loh_ref) != 1)) { | |
1156 | struct lu_site *site = obj->co_lu.lo_dev->ld_site; | |
1157 | struct lu_site_bkt_data *bkt; | |
1158 | ||
1159 | bkt = lu_site_bkt_from_fid(site, &header->loh_fid); | |
1160 | ||
9e795d35 | 1161 | init_waitqueue_entry(&waiter, current); |
d7e09d03 PT |
1162 | add_wait_queue(&bkt->lsb_marche_funebre, &waiter); |
1163 | ||
1164 | while (1) { | |
1165 | set_current_state(TASK_UNINTERRUPTIBLE); | |
1166 | if (atomic_read(&header->loh_ref) == 1) | |
1167 | break; | |
b3669a7f | 1168 | schedule(); |
d7e09d03 PT |
1169 | } |
1170 | ||
1171 | set_current_state(TASK_RUNNING); | |
1172 | remove_wait_queue(&bkt->lsb_marche_funebre, &waiter); | |
1173 | } | |
1174 | ||
1175 | cl_object_put(env, obj); | |
1176 | } | |
1177 | ||
1178 | void cl_inode_fini(struct inode *inode) | |
1179 | { | |
1180 | struct lu_env *env; | |
1181 | struct cl_inode_info *lli = cl_i2info(inode); | |
1182 | struct cl_object *clob = lli->lli_clob; | |
1183 | int refcheck; | |
1184 | int emergency; | |
1185 | ||
1186 | if (clob != NULL) { | |
1187 | void *cookie; | |
1188 | ||
1189 | cookie = cl_env_reenter(); | |
1190 | env = cl_env_get(&refcheck); | |
1191 | emergency = IS_ERR(env); | |
1192 | if (emergency) { | |
1193 | mutex_lock(&ccc_inode_fini_guard); | |
1194 | LASSERT(ccc_inode_fini_env != NULL); | |
1195 | cl_env_implant(ccc_inode_fini_env, &refcheck); | |
1196 | env = ccc_inode_fini_env; | |
1197 | } | |
1198 | /* | |
1199 | * cl_object cache is a slave to inode cache (which, in turn | |
1200 | * is a slave to dentry cache), don't keep cl_object in memory | |
1201 | * when its master is evicted. | |
1202 | */ | |
1203 | cl_object_kill(env, clob); | |
1204 | lu_object_ref_del(&clob->co_lu, "inode", inode); | |
1205 | cl_object_put_last(env, clob); | |
1206 | lli->lli_clob = NULL; | |
1207 | if (emergency) { | |
1208 | cl_env_unplant(ccc_inode_fini_env, &refcheck); | |
1209 | mutex_unlock(&ccc_inode_fini_guard); | |
1210 | } else | |
1211 | cl_env_put(env, &refcheck); | |
1212 | cl_env_reexit(cookie); | |
1213 | } | |
1214 | } | |
1215 | ||
1216 | /** | |
1217 | * return IF_* type for given lu_dirent entry. | |
1218 | * IF_* flag shld be converted to particular OS file type in | |
1219 | * platform llite module. | |
1220 | */ | |
1221 | __u16 ll_dirent_type_get(struct lu_dirent *ent) | |
1222 | { | |
1223 | __u16 type = 0; | |
1224 | struct luda_type *lt; | |
1225 | int len = 0; | |
1226 | ||
1227 | if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) { | |
1228 | const unsigned align = sizeof(struct luda_type) - 1; | |
1229 | ||
1230 | len = le16_to_cpu(ent->lde_namelen); | |
1231 | len = (len + align) & ~align; | |
1232 | lt = (void *)ent->lde_name + len; | |
1233 | type = IFTODT(le16_to_cpu(lt->lt_type)); | |
1234 | } | |
1235 | return type; | |
1236 | } | |
1237 | ||
1238 | /** | |
1239 | * build inode number from passed @fid */ | |
1240 | __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) | |
1241 | { | |
1242 | if (BITS_PER_LONG == 32 || api32) | |
0a3bdb00 | 1243 | return fid_flatten32(fid); |
d7e09d03 | 1244 | else |
0a3bdb00 | 1245 | return fid_flatten(fid); |
d7e09d03 PT |
1246 | } |
1247 | ||
1248 | /** | |
1249 | * build inode generation from passed @fid. If our FID overflows the 32-bit | |
1250 | * inode number then return a non-zero generation to distinguish them. */ | |
1251 | __u32 cl_fid_build_gen(const struct lu_fid *fid) | |
1252 | { | |
1253 | __u32 gen; | |
d7e09d03 PT |
1254 | |
1255 | if (fid_is_igif(fid)) { | |
1256 | gen = lu_igif_gen(fid); | |
0a3bdb00 | 1257 | return gen; |
d7e09d03 PT |
1258 | } |
1259 | ||
1260 | gen = (fid_flatten(fid) >> 32); | |
0a3bdb00 | 1261 | return gen; |
d7e09d03 PT |
1262 | } |
1263 | ||
1264 | /* lsm is unreliable after hsm implementation as layout can be changed at | |
1265 | * any time. This is only to support old, non-clio-ized interfaces. It will | |
1266 | * cause deadlock if clio operations are called with this extra layout refcount | |
1267 | * because in case the layout changed during the IO, ll_layout_refresh() will | |
1268 | * have to wait for the refcount to become zero to destroy the older layout. | |
1269 | * | |
1270 | * Notice that the lsm returned by this function may not be valid unless called | |
1271 | * inside layout lock - MDS_INODELOCK_LAYOUT. */ | |
1272 | struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode) | |
1273 | { | |
1274 | return lov_lsm_get(cl_i2info(inode)->lli_clob); | |
1275 | } | |
1276 | ||
60dc39b4 | 1277 | inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm) |
d7e09d03 PT |
1278 | { |
1279 | lov_lsm_put(cl_i2info(inode)->lli_clob, lsm); | |
1280 | } |