Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2011, 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * cl code shared between vvp and liblustre (and other Lustre clients in the | |
37 | * future). | |
38 | * | |
39 | * Author: Nikita Danilov <nikita.danilov@sun.com> | |
40 | */ | |
41 | ||
42 | #define DEBUG_SUBSYSTEM S_LLITE | |
43 | ||
44 | # include <linux/libcfs/libcfs.h> | |
45 | # include <linux/fs.h> | |
46 | # include <linux/sched.h> | |
47 | # include <linux/mm.h> | |
48 | # include <linux/quotaops.h> | |
49 | # include <linux/highmem.h> | |
50 | # include <linux/pagemap.h> | |
51 | # include <linux/rbtree.h> | |
52 | ||
53 | #include <obd.h> | |
54 | #include <obd_support.h> | |
55 | #include <lustre_fid.h> | |
56 | #include <lustre_lite.h> | |
57 | #include <lustre_dlm.h> | |
58 | #include <lustre_ver.h> | |
59 | #include <lustre_mdc.h> | |
60 | #include <cl_object.h> | |
61 | ||
62 | #include <lclient.h> | |
63 | ||
64 | #include "../llite/llite_internal.h" | |
65 | ||
66 | const struct cl_req_operations ccc_req_ops; | |
67 | ||
68 | /* | |
69 | * ccc_ prefix stands for "Common Client Code". | |
70 | */ | |
71 | ||
72 | static struct kmem_cache *ccc_lock_kmem; | |
73 | static struct kmem_cache *ccc_object_kmem; | |
74 | static struct kmem_cache *ccc_thread_kmem; | |
75 | static struct kmem_cache *ccc_session_kmem; | |
76 | static struct kmem_cache *ccc_req_kmem; | |
77 | ||
78 | static struct lu_kmem_descr ccc_caches[] = { | |
79 | { | |
80 | .ckd_cache = &ccc_lock_kmem, | |
81 | .ckd_name = "ccc_lock_kmem", | |
82 | .ckd_size = sizeof (struct ccc_lock) | |
83 | }, | |
84 | { | |
85 | .ckd_cache = &ccc_object_kmem, | |
86 | .ckd_name = "ccc_object_kmem", | |
87 | .ckd_size = sizeof (struct ccc_object) | |
88 | }, | |
89 | { | |
90 | .ckd_cache = &ccc_thread_kmem, | |
91 | .ckd_name = "ccc_thread_kmem", | |
92 | .ckd_size = sizeof (struct ccc_thread_info), | |
93 | }, | |
94 | { | |
95 | .ckd_cache = &ccc_session_kmem, | |
96 | .ckd_name = "ccc_session_kmem", | |
97 | .ckd_size = sizeof (struct ccc_session) | |
98 | }, | |
99 | { | |
100 | .ckd_cache = &ccc_req_kmem, | |
101 | .ckd_name = "ccc_req_kmem", | |
102 | .ckd_size = sizeof (struct ccc_req) | |
103 | }, | |
104 | { | |
105 | .ckd_cache = NULL | |
106 | } | |
107 | }; | |
108 | ||
109 | /***************************************************************************** | |
110 | * | |
111 | * Vvp device and device type functions. | |
112 | * | |
113 | */ | |
114 | ||
115 | void *ccc_key_init(const struct lu_context *ctx, | |
116 | struct lu_context_key *key) | |
117 | { | |
118 | struct ccc_thread_info *info; | |
119 | ||
120 | OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, __GFP_IO); | |
121 | if (info == NULL) | |
122 | info = ERR_PTR(-ENOMEM); | |
123 | return info; | |
124 | } | |
125 | ||
126 | void ccc_key_fini(const struct lu_context *ctx, | |
127 | struct lu_context_key *key, void *data) | |
128 | { | |
129 | struct ccc_thread_info *info = data; | |
130 | OBD_SLAB_FREE_PTR(info, ccc_thread_kmem); | |
131 | } | |
132 | ||
133 | void *ccc_session_key_init(const struct lu_context *ctx, | |
134 | struct lu_context_key *key) | |
135 | { | |
136 | struct ccc_session *session; | |
137 | ||
138 | OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, __GFP_IO); | |
139 | if (session == NULL) | |
140 | session = ERR_PTR(-ENOMEM); | |
141 | return session; | |
142 | } | |
143 | ||
144 | void ccc_session_key_fini(const struct lu_context *ctx, | |
145 | struct lu_context_key *key, void *data) | |
146 | { | |
147 | struct ccc_session *session = data; | |
148 | OBD_SLAB_FREE_PTR(session, ccc_session_kmem); | |
149 | } | |
150 | ||
151 | struct lu_context_key ccc_key = { | |
152 | .lct_tags = LCT_CL_THREAD, | |
153 | .lct_init = ccc_key_init, | |
154 | .lct_fini = ccc_key_fini | |
155 | }; | |
156 | ||
157 | struct lu_context_key ccc_session_key = { | |
158 | .lct_tags = LCT_SESSION, | |
159 | .lct_init = ccc_session_key_init, | |
160 | .lct_fini = ccc_session_key_fini | |
161 | }; | |
162 | ||
163 | ||
164 | /* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */ | |
165 | // LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); | |
166 | ||
167 | int ccc_device_init(const struct lu_env *env, struct lu_device *d, | |
168 | const char *name, struct lu_device *next) | |
169 | { | |
170 | struct ccc_device *vdv; | |
171 | int rc; | |
d7e09d03 PT |
172 | |
173 | vdv = lu2ccc_dev(d); | |
174 | vdv->cdv_next = lu2cl_dev(next); | |
175 | ||
176 | LASSERT(d->ld_site != NULL && next->ld_type != NULL); | |
177 | next->ld_site = d->ld_site; | |
178 | rc = next->ld_type->ldt_ops->ldto_device_init( | |
179 | env, next, next->ld_type->ldt_name, NULL); | |
180 | if (rc == 0) { | |
181 | lu_device_get(next); | |
182 | lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init); | |
183 | } | |
184 | RETURN(rc); | |
185 | } | |
186 | ||
187 | struct lu_device *ccc_device_fini(const struct lu_env *env, | |
188 | struct lu_device *d) | |
189 | { | |
190 | return cl2lu_dev(lu2ccc_dev(d)->cdv_next); | |
191 | } | |
192 | ||
193 | struct lu_device *ccc_device_alloc(const struct lu_env *env, | |
194 | struct lu_device_type *t, | |
195 | struct lustre_cfg *cfg, | |
196 | const struct lu_device_operations *luops, | |
197 | const struct cl_device_operations *clops) | |
198 | { | |
199 | struct ccc_device *vdv; | |
200 | struct lu_device *lud; | |
201 | struct cl_site *site; | |
202 | int rc; | |
d7e09d03 PT |
203 | |
204 | OBD_ALLOC_PTR(vdv); | |
205 | if (vdv == NULL) | |
206 | RETURN(ERR_PTR(-ENOMEM)); | |
207 | ||
208 | lud = &vdv->cdv_cl.cd_lu_dev; | |
209 | cl_device_init(&vdv->cdv_cl, t); | |
210 | ccc2lu_dev(vdv)->ld_ops = luops; | |
211 | vdv->cdv_cl.cd_ops = clops; | |
212 | ||
213 | OBD_ALLOC_PTR(site); | |
214 | if (site != NULL) { | |
215 | rc = cl_site_init(site, &vdv->cdv_cl); | |
216 | if (rc == 0) | |
217 | rc = lu_site_init_finish(&site->cs_lu); | |
218 | else { | |
219 | LASSERT(lud->ld_site == NULL); | |
220 | CERROR("Cannot init lu_site, rc %d.\n", rc); | |
221 | OBD_FREE_PTR(site); | |
222 | } | |
223 | } else | |
224 | rc = -ENOMEM; | |
225 | if (rc != 0) { | |
226 | ccc_device_free(env, lud); | |
227 | lud = ERR_PTR(rc); | |
228 | } | |
229 | RETURN(lud); | |
230 | } | |
231 | ||
232 | struct lu_device *ccc_device_free(const struct lu_env *env, | |
233 | struct lu_device *d) | |
234 | { | |
235 | struct ccc_device *vdv = lu2ccc_dev(d); | |
236 | struct cl_site *site = lu2cl_site(d->ld_site); | |
237 | struct lu_device *next = cl2lu_dev(vdv->cdv_next); | |
238 | ||
239 | if (d->ld_site != NULL) { | |
240 | cl_site_fini(site); | |
241 | OBD_FREE_PTR(site); | |
242 | } | |
243 | cl_device_fini(lu2cl_dev(d)); | |
244 | OBD_FREE_PTR(vdv); | |
245 | return next; | |
246 | } | |
247 | ||
248 | int ccc_req_init(const struct lu_env *env, struct cl_device *dev, | |
249 | struct cl_req *req) | |
250 | { | |
251 | struct ccc_req *vrq; | |
252 | int result; | |
253 | ||
254 | OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, __GFP_IO); | |
255 | if (vrq != NULL) { | |
256 | cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops); | |
257 | result = 0; | |
258 | } else | |
259 | result = -ENOMEM; | |
260 | return result; | |
261 | } | |
262 | ||
263 | /** | |
264 | * An `emergency' environment used by ccc_inode_fini() when cl_env_get() | |
265 | * fails. Access to this environment is serialized by ccc_inode_fini_guard | |
266 | * mutex. | |
267 | */ | |
268 | static struct lu_env *ccc_inode_fini_env = NULL; | |
269 | ||
270 | /** | |
271 | * A mutex serializing calls to slp_inode_fini() under extreme memory | |
272 | * pressure, when environments cannot be allocated. | |
273 | */ | |
274 | static DEFINE_MUTEX(ccc_inode_fini_guard); | |
275 | static int dummy_refcheck; | |
276 | ||
277 | int ccc_global_init(struct lu_device_type *device_type) | |
278 | { | |
279 | int result; | |
280 | ||
281 | result = lu_kmem_init(ccc_caches); | |
282 | if (result) | |
283 | return result; | |
284 | ||
285 | result = lu_device_type_init(device_type); | |
286 | if (result) | |
287 | goto out_kmem; | |
288 | ||
289 | ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck, | |
290 | LCT_REMEMBER|LCT_NOREF); | |
291 | if (IS_ERR(ccc_inode_fini_env)) { | |
292 | result = PTR_ERR(ccc_inode_fini_env); | |
293 | goto out_device; | |
294 | } | |
295 | ||
296 | ccc_inode_fini_env->le_ctx.lc_cookie = 0x4; | |
297 | return 0; | |
298 | out_device: | |
299 | lu_device_type_fini(device_type); | |
300 | out_kmem: | |
301 | lu_kmem_fini(ccc_caches); | |
302 | return result; | |
303 | } | |
304 | ||
305 | void ccc_global_fini(struct lu_device_type *device_type) | |
306 | { | |
307 | if (ccc_inode_fini_env != NULL) { | |
308 | cl_env_put(ccc_inode_fini_env, &dummy_refcheck); | |
309 | ccc_inode_fini_env = NULL; | |
310 | } | |
311 | lu_device_type_fini(device_type); | |
312 | lu_kmem_fini(ccc_caches); | |
313 | } | |
314 | ||
315 | /***************************************************************************** | |
316 | * | |
317 | * Object operations. | |
318 | * | |
319 | */ | |
320 | ||
321 | struct lu_object *ccc_object_alloc(const struct lu_env *env, | |
322 | const struct lu_object_header *unused, | |
323 | struct lu_device *dev, | |
324 | const struct cl_object_operations *clops, | |
325 | const struct lu_object_operations *luops) | |
326 | { | |
327 | struct ccc_object *vob; | |
328 | struct lu_object *obj; | |
329 | ||
330 | OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, __GFP_IO); | |
331 | if (vob != NULL) { | |
332 | struct cl_object_header *hdr; | |
333 | ||
334 | obj = ccc2lu(vob); | |
335 | hdr = &vob->cob_header; | |
336 | cl_object_header_init(hdr); | |
337 | lu_object_init(obj, &hdr->coh_lu, dev); | |
338 | lu_object_add_top(&hdr->coh_lu, obj); | |
339 | ||
340 | vob->cob_cl.co_ops = clops; | |
341 | obj->lo_ops = luops; | |
342 | } else | |
343 | obj = NULL; | |
344 | return obj; | |
345 | } | |
346 | ||
347 | int ccc_object_init0(const struct lu_env *env, | |
348 | struct ccc_object *vob, | |
349 | const struct cl_object_conf *conf) | |
350 | { | |
351 | vob->cob_inode = conf->coc_inode; | |
352 | vob->cob_transient_pages = 0; | |
353 | cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page)); | |
354 | return 0; | |
355 | } | |
356 | ||
357 | int ccc_object_init(const struct lu_env *env, struct lu_object *obj, | |
358 | const struct lu_object_conf *conf) | |
359 | { | |
360 | struct ccc_device *dev = lu2ccc_dev(obj->lo_dev); | |
361 | struct ccc_object *vob = lu2ccc(obj); | |
362 | struct lu_object *below; | |
363 | struct lu_device *under; | |
364 | int result; | |
365 | ||
366 | under = &dev->cdv_next->cd_lu_dev; | |
367 | below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); | |
368 | if (below != NULL) { | |
369 | const struct cl_object_conf *cconf; | |
370 | ||
371 | cconf = lu2cl_conf(conf); | |
372 | INIT_LIST_HEAD(&vob->cob_pending_list); | |
373 | lu_object_add(obj, below); | |
374 | result = ccc_object_init0(env, vob, cconf); | |
375 | } else | |
376 | result = -ENOMEM; | |
377 | return result; | |
378 | } | |
379 | ||
380 | void ccc_object_free(const struct lu_env *env, struct lu_object *obj) | |
381 | { | |
382 | struct ccc_object *vob = lu2ccc(obj); | |
383 | ||
384 | lu_object_fini(obj); | |
385 | lu_object_header_fini(obj->lo_header); | |
386 | OBD_SLAB_FREE_PTR(vob, ccc_object_kmem); | |
387 | } | |
388 | ||
389 | int ccc_lock_init(const struct lu_env *env, | |
390 | struct cl_object *obj, struct cl_lock *lock, | |
391 | const struct cl_io *unused, | |
392 | const struct cl_lock_operations *lkops) | |
393 | { | |
394 | struct ccc_lock *clk; | |
395 | int result; | |
396 | ||
397 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
398 | ||
399 | OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, __GFP_IO); | |
400 | if (clk != NULL) { | |
401 | cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops); | |
402 | result = 0; | |
403 | } else | |
404 | result = -ENOMEM; | |
405 | return result; | |
406 | } | |
407 | ||
408 | int ccc_attr_set(const struct lu_env *env, struct cl_object *obj, | |
409 | const struct cl_attr *attr, unsigned valid) | |
410 | { | |
411 | return 0; | |
412 | } | |
413 | ||
414 | int ccc_object_glimpse(const struct lu_env *env, | |
415 | const struct cl_object *obj, struct ost_lvb *lvb) | |
416 | { | |
417 | struct inode *inode = ccc_object_inode(obj); | |
418 | ||
d7e09d03 PT |
419 | lvb->lvb_mtime = cl_inode_mtime(inode); |
420 | lvb->lvb_atime = cl_inode_atime(inode); | |
421 | lvb->lvb_ctime = cl_inode_ctime(inode); | |
422 | /* | |
423 | * LU-417: Add dirty pages block count lest i_blocks reports 0, some | |
424 | * "cp" or "tar" on remote node may think it's a completely sparse file | |
425 | * and skip it. | |
426 | */ | |
427 | if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0) | |
428 | lvb->lvb_blocks = dirty_cnt(inode); | |
429 | RETURN(0); | |
430 | } | |
431 | ||
432 | ||
433 | ||
434 | int ccc_conf_set(const struct lu_env *env, struct cl_object *obj, | |
435 | const struct cl_object_conf *conf) | |
436 | { | |
437 | /* TODO: destroy all pages attached to this object. */ | |
438 | return 0; | |
439 | } | |
440 | ||
441 | static void ccc_object_size_lock(struct cl_object *obj) | |
442 | { | |
443 | struct inode *inode = ccc_object_inode(obj); | |
444 | ||
445 | cl_isize_lock(inode); | |
446 | cl_object_attr_lock(obj); | |
447 | } | |
448 | ||
449 | static void ccc_object_size_unlock(struct cl_object *obj) | |
450 | { | |
451 | struct inode *inode = ccc_object_inode(obj); | |
452 | ||
453 | cl_object_attr_unlock(obj); | |
454 | cl_isize_unlock(inode); | |
455 | } | |
456 | ||
457 | /***************************************************************************** | |
458 | * | |
459 | * Page operations. | |
460 | * | |
461 | */ | |
462 | ||
463 | struct page *ccc_page_vmpage(const struct lu_env *env, | |
464 | const struct cl_page_slice *slice) | |
465 | { | |
466 | return cl2vm_page(slice); | |
467 | } | |
468 | ||
469 | int ccc_page_is_under_lock(const struct lu_env *env, | |
470 | const struct cl_page_slice *slice, | |
471 | struct cl_io *io) | |
472 | { | |
473 | struct ccc_io *cio = ccc_env_io(env); | |
474 | struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr; | |
475 | struct cl_page *page = slice->cpl_page; | |
476 | ||
477 | int result; | |
478 | ||
d7e09d03 PT |
479 | if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || |
480 | io->ci_type == CIT_FAULT) { | |
481 | if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED) | |
482 | result = -EBUSY; | |
483 | else { | |
484 | desc->cld_start = page->cp_index; | |
485 | desc->cld_end = page->cp_index; | |
486 | desc->cld_obj = page->cp_obj; | |
487 | desc->cld_mode = CLM_READ; | |
488 | result = cl_queue_match(&io->ci_lockset.cls_done, | |
489 | desc) ? -EBUSY : 0; | |
490 | } | |
491 | } else | |
492 | result = 0; | |
493 | RETURN(result); | |
494 | } | |
495 | ||
496 | int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice) | |
497 | { | |
498 | /* | |
499 | * Cached read? | |
500 | */ | |
501 | LBUG(); | |
502 | return 0; | |
503 | } | |
504 | ||
505 | void ccc_transient_page_verify(const struct cl_page *page) | |
506 | { | |
507 | } | |
508 | ||
509 | int ccc_transient_page_own(const struct lu_env *env, | |
510 | const struct cl_page_slice *slice, | |
511 | struct cl_io *unused, | |
512 | int nonblock) | |
513 | { | |
514 | ccc_transient_page_verify(slice->cpl_page); | |
515 | return 0; | |
516 | } | |
517 | ||
518 | void ccc_transient_page_assume(const struct lu_env *env, | |
519 | const struct cl_page_slice *slice, | |
520 | struct cl_io *unused) | |
521 | { | |
522 | ccc_transient_page_verify(slice->cpl_page); | |
523 | } | |
524 | ||
525 | void ccc_transient_page_unassume(const struct lu_env *env, | |
526 | const struct cl_page_slice *slice, | |
527 | struct cl_io *unused) | |
528 | { | |
529 | ccc_transient_page_verify(slice->cpl_page); | |
530 | } | |
531 | ||
532 | void ccc_transient_page_disown(const struct lu_env *env, | |
533 | const struct cl_page_slice *slice, | |
534 | struct cl_io *unused) | |
535 | { | |
536 | ccc_transient_page_verify(slice->cpl_page); | |
537 | } | |
538 | ||
539 | void ccc_transient_page_discard(const struct lu_env *env, | |
540 | const struct cl_page_slice *slice, | |
541 | struct cl_io *unused) | |
542 | { | |
543 | struct cl_page *page = slice->cpl_page; | |
544 | ||
545 | ccc_transient_page_verify(slice->cpl_page); | |
546 | ||
547 | /* | |
548 | * For transient pages, remove it from the radix tree. | |
549 | */ | |
550 | cl_page_delete(env, page); | |
551 | } | |
552 | ||
553 | int ccc_transient_page_prep(const struct lu_env *env, | |
554 | const struct cl_page_slice *slice, | |
555 | struct cl_io *unused) | |
556 | { | |
d7e09d03 PT |
557 | /* transient page should always be sent. */ |
558 | RETURN(0); | |
559 | } | |
560 | ||
561 | /***************************************************************************** | |
562 | * | |
563 | * Lock operations. | |
564 | * | |
565 | */ | |
566 | ||
567 | void ccc_lock_delete(const struct lu_env *env, | |
568 | const struct cl_lock_slice *slice) | |
569 | { | |
570 | CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); | |
571 | } | |
572 | ||
573 | void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice) | |
574 | { | |
575 | struct ccc_lock *clk = cl2ccc_lock(slice); | |
576 | OBD_SLAB_FREE_PTR(clk, ccc_lock_kmem); | |
577 | } | |
578 | ||
579 | int ccc_lock_enqueue(const struct lu_env *env, | |
580 | const struct cl_lock_slice *slice, | |
581 | struct cl_io *unused, __u32 enqflags) | |
582 | { | |
583 | CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); | |
584 | return 0; | |
585 | } | |
586 | ||
587 | int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice) | |
588 | { | |
589 | CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); | |
590 | return 0; | |
591 | } | |
592 | ||
593 | int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice) | |
594 | { | |
595 | CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj)); | |
596 | return 0; | |
597 | } | |
598 | ||
599 | /** | |
600 | * Implementation of cl_lock_operations::clo_fits_into() methods for ccc | |
601 | * layer. This function is executed every time io finds an existing lock in | |
602 | * the lock cache while creating new lock. This function has to decide whether | |
603 | * cached lock "fits" into io. | |
604 | * | |
605 | * \param slice lock to be checked | |
606 | * \param io IO that wants a lock. | |
607 | * | |
608 | * \see lov_lock_fits_into(). | |
609 | */ | |
610 | int ccc_lock_fits_into(const struct lu_env *env, | |
611 | const struct cl_lock_slice *slice, | |
612 | const struct cl_lock_descr *need, | |
613 | const struct cl_io *io) | |
614 | { | |
615 | const struct cl_lock *lock = slice->cls_lock; | |
616 | const struct cl_lock_descr *descr = &lock->cll_descr; | |
617 | const struct ccc_io *cio = ccc_env_io(env); | |
618 | int result; | |
619 | ||
d7e09d03 PT |
620 | /* |
621 | * Work around DLM peculiarity: it assumes that glimpse | |
622 | * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock | |
623 | * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make | |
624 | * sure that glimpse doesn't get CLM_WRITE top-lock, so that it | |
625 | * doesn't enqueue CLM_WRITE sub-locks. | |
626 | */ | |
627 | if (cio->cui_glimpse) | |
628 | result = descr->cld_mode != CLM_WRITE; | |
629 | ||
630 | /* | |
631 | * Also, don't match incomplete write locks for read, otherwise read | |
632 | * would enqueue missing sub-locks in the write mode. | |
633 | */ | |
634 | else if (need->cld_mode != descr->cld_mode) | |
635 | result = lock->cll_state >= CLS_ENQUEUED; | |
636 | else | |
637 | result = 1; | |
638 | RETURN(result); | |
639 | } | |
640 | ||
641 | /** | |
642 | * Implements cl_lock_operations::clo_state() method for ccc layer, invoked | |
643 | * whenever lock state changes. Transfers object attributes, that might be | |
644 | * updated as a result of lock acquiring into inode. | |
645 | */ | |
646 | void ccc_lock_state(const struct lu_env *env, | |
647 | const struct cl_lock_slice *slice, | |
648 | enum cl_lock_state state) | |
649 | { | |
650 | struct cl_lock *lock = slice->cls_lock; | |
d7e09d03 PT |
651 | |
652 | /* | |
653 | * Refresh inode attributes when the lock is moving into CLS_HELD | |
654 | * state, and only when this is a result of real enqueue, rather than | |
655 | * of finding lock in the cache. | |
656 | */ | |
657 | if (state == CLS_HELD && lock->cll_state < CLS_HELD) { | |
658 | struct cl_object *obj; | |
659 | struct inode *inode; | |
660 | ||
661 | obj = slice->cls_obj; | |
662 | inode = ccc_object_inode(obj); | |
663 | ||
664 | /* vmtruncate() sets the i_size | |
665 | * under both a DLM lock and the | |
666 | * ll_inode_size_lock(). If we don't get the | |
667 | * ll_inode_size_lock() here we can match the DLM lock and | |
668 | * reset i_size. generic_file_write can then trust the | |
669 | * stale i_size when doing appending writes and effectively | |
670 | * cancel the result of the truncate. Getting the | |
671 | * ll_inode_size_lock() after the enqueue maintains the DLM | |
672 | * -> ll_inode_size_lock() acquiring order. */ | |
673 | if (lock->cll_descr.cld_start == 0 && | |
674 | lock->cll_descr.cld_end == CL_PAGE_EOF) | |
675 | cl_merge_lvb(env, inode); | |
676 | } | |
d7e09d03 PT |
677 | } |
678 | ||
679 | /***************************************************************************** | |
680 | * | |
681 | * io operations. | |
682 | * | |
683 | */ | |
684 | ||
685 | void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) | |
686 | { | |
687 | struct cl_io *io = ios->cis_io; | |
688 | ||
689 | CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); | |
690 | } | |
691 | ||
692 | int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, | |
693 | __u32 enqflags, enum cl_lock_mode mode, | |
694 | pgoff_t start, pgoff_t end) | |
695 | { | |
696 | struct ccc_io *cio = ccc_env_io(env); | |
697 | struct cl_lock_descr *descr = &cio->cui_link.cill_descr; | |
698 | struct cl_object *obj = io->ci_obj; | |
699 | ||
700 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
d7e09d03 PT |
701 | |
702 | CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end); | |
703 | ||
704 | memset(&cio->cui_link, 0, sizeof cio->cui_link); | |
705 | ||
706 | if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { | |
707 | descr->cld_mode = CLM_GROUP; | |
708 | descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid; | |
709 | } else { | |
710 | descr->cld_mode = mode; | |
711 | } | |
712 | descr->cld_obj = obj; | |
713 | descr->cld_start = start; | |
714 | descr->cld_end = end; | |
715 | descr->cld_enq_flags = enqflags; | |
716 | ||
717 | cl_io_lock_add(env, io, &cio->cui_link); | |
718 | RETURN(0); | |
719 | } | |
720 | ||
721 | void ccc_io_update_iov(const struct lu_env *env, | |
722 | struct ccc_io *cio, struct cl_io *io) | |
723 | { | |
724 | int i; | |
725 | size_t size = io->u.ci_rw.crw_count; | |
726 | ||
727 | cio->cui_iov_olen = 0; | |
728 | if (!cl_is_normalio(env, io) || cio->cui_tot_nrsegs == 0) | |
729 | return; | |
730 | ||
731 | for (i = 0; i < cio->cui_tot_nrsegs; i++) { | |
732 | struct iovec *iv = &cio->cui_iov[i]; | |
733 | ||
734 | if (iv->iov_len < size) | |
735 | size -= iv->iov_len; | |
736 | else { | |
737 | if (iv->iov_len > size) { | |
738 | cio->cui_iov_olen = iv->iov_len; | |
739 | iv->iov_len = size; | |
740 | } | |
741 | break; | |
742 | } | |
743 | } | |
744 | ||
745 | cio->cui_nrsegs = i + 1; | |
746 | LASSERTF(cio->cui_tot_nrsegs >= cio->cui_nrsegs, | |
747 | "tot_nrsegs: %lu, nrsegs: %lu\n", | |
748 | cio->cui_tot_nrsegs, cio->cui_nrsegs); | |
749 | } | |
750 | ||
751 | int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io, | |
752 | __u32 enqflags, enum cl_lock_mode mode, | |
753 | loff_t start, loff_t end) | |
754 | { | |
755 | struct cl_object *obj = io->ci_obj; | |
756 | return ccc_io_one_lock_index(env, io, enqflags, mode, | |
757 | cl_index(obj, start), cl_index(obj, end)); | |
758 | } | |
759 | ||
760 | void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios) | |
761 | { | |
762 | CLOBINVRNT(env, ios->cis_io->ci_obj, | |
763 | ccc_object_invariant(ios->cis_io->ci_obj)); | |
764 | } | |
765 | ||
766 | void ccc_io_advance(const struct lu_env *env, | |
767 | const struct cl_io_slice *ios, | |
768 | size_t nob) | |
769 | { | |
770 | struct ccc_io *cio = cl2ccc_io(env, ios); | |
771 | struct cl_io *io = ios->cis_io; | |
772 | struct cl_object *obj = ios->cis_io->ci_obj; | |
773 | ||
774 | CLOBINVRNT(env, obj, ccc_object_invariant(obj)); | |
775 | ||
776 | if (!cl_is_normalio(env, io)) | |
777 | return; | |
778 | ||
779 | LASSERT(cio->cui_tot_nrsegs >= cio->cui_nrsegs); | |
780 | LASSERT(cio->cui_tot_count >= nob); | |
781 | ||
782 | cio->cui_iov += cio->cui_nrsegs; | |
783 | cio->cui_tot_nrsegs -= cio->cui_nrsegs; | |
784 | cio->cui_tot_count -= nob; | |
785 | ||
786 | /* update the iov */ | |
787 | if (cio->cui_iov_olen > 0) { | |
788 | struct iovec *iv; | |
789 | ||
790 | cio->cui_iov--; | |
791 | cio->cui_tot_nrsegs++; | |
792 | iv = &cio->cui_iov[0]; | |
793 | if (io->ci_continue) { | |
794 | iv->iov_base += iv->iov_len; | |
795 | LASSERT(cio->cui_iov_olen > iv->iov_len); | |
796 | iv->iov_len = cio->cui_iov_olen - iv->iov_len; | |
797 | } else { | |
798 | /* restore the iov_len, in case of restart io. */ | |
799 | iv->iov_len = cio->cui_iov_olen; | |
800 | } | |
801 | cio->cui_iov_olen = 0; | |
802 | } | |
803 | } | |
804 | ||
805 | /** | |
806 | * Helper function that if necessary adjusts file size (inode->i_size), when | |
807 | * position at the offset \a pos is accessed. File size can be arbitrary stale | |
808 | * on a Lustre client, but client at least knows KMS. If accessed area is | |
809 | * inside [0, KMS], set file size to KMS, otherwise glimpse file size. | |
810 | * | |
811 | * Locking: cl_isize_lock is used to serialize changes to inode size and to | |
812 | * protect consistency between inode size and cl_object | |
813 | * attributes. cl_object_size_lock() protects consistency between cl_attr's of | |
814 | * top-object and sub-objects. | |
815 | */ | |
816 | int ccc_prep_size(const struct lu_env *env, struct cl_object *obj, | |
817 | struct cl_io *io, loff_t start, size_t count, int *exceed) | |
818 | { | |
819 | struct cl_attr *attr = ccc_env_thread_attr(env); | |
820 | struct inode *inode = ccc_object_inode(obj); | |
821 | loff_t pos = start + count - 1; | |
822 | loff_t kms; | |
823 | int result; | |
824 | ||
825 | /* | |
826 | * Consistency guarantees: following possibilities exist for the | |
827 | * relation between region being accessed and real file size at this | |
828 | * moment: | |
829 | * | |
830 | * (A): the region is completely inside of the file; | |
831 | * | |
832 | * (B-x): x bytes of region are inside of the file, the rest is | |
833 | * outside; | |
834 | * | |
835 | * (C): the region is completely outside of the file. | |
836 | * | |
837 | * This classification is stable under DLM lock already acquired by | |
838 | * the caller, because to change the class, other client has to take | |
839 | * DLM lock conflicting with our lock. Also, any updates to ->i_size | |
840 | * by other threads on this client are serialized by | |
841 | * ll_inode_size_lock(). This guarantees that short reads are handled | |
842 | * correctly in the face of concurrent writes and truncates. | |
843 | */ | |
844 | ccc_object_size_lock(obj); | |
845 | result = cl_object_attr_get(env, obj, attr); | |
846 | if (result == 0) { | |
847 | kms = attr->cat_kms; | |
848 | if (pos > kms) { | |
849 | /* | |
850 | * A glimpse is necessary to determine whether we | |
851 | * return a short read (B) or some zeroes at the end | |
852 | * of the buffer (C) | |
853 | */ | |
854 | ccc_object_size_unlock(obj); | |
855 | result = cl_glimpse_lock(env, io, inode, obj, 0); | |
856 | if (result == 0 && exceed != NULL) { | |
857 | /* If objective page index exceed end-of-file | |
858 | * page index, return directly. Do not expect | |
859 | * kernel will check such case correctly. | |
860 | * linux-2.6.18-128.1.1 miss to do that. | |
861 | * --bug 17336 */ | |
862 | loff_t size = cl_isize_read(inode); | |
863 | unsigned long cur_index = start >> PAGE_CACHE_SHIFT; | |
864 | ||
865 | if ((size == 0 && cur_index != 0) || | |
866 | (((size - 1) >> PAGE_CACHE_SHIFT) < cur_index)) | |
867 | *exceed = 1; | |
868 | } | |
869 | return result; | |
870 | } else { | |
871 | /* | |
872 | * region is within kms and, hence, within real file | |
873 | * size (A). We need to increase i_size to cover the | |
874 | * read region so that generic_file_read() will do its | |
875 | * job, but that doesn't mean the kms size is | |
876 | * _correct_, it is only the _minimum_ size. If | |
877 | * someone does a stat they will get the correct size | |
878 | * which will always be >= the kms value here. | |
879 | * b=11081 | |
880 | */ | |
881 | if (cl_isize_read(inode) < kms) { | |
882 | cl_isize_write_nolock(inode, kms); | |
883 | CDEBUG(D_VFSTRACE, | |
884 | DFID" updating i_size "LPU64"\n", | |
885 | PFID(lu_object_fid(&obj->co_lu)), | |
886 | (__u64)cl_isize_read(inode)); | |
887 | ||
888 | } | |
889 | } | |
890 | } | |
891 | ccc_object_size_unlock(obj); | |
892 | return result; | |
893 | } | |
894 | ||
895 | /***************************************************************************** | |
896 | * | |
897 | * Transfer operations. | |
898 | * | |
899 | */ | |
900 | ||
901 | void ccc_req_completion(const struct lu_env *env, | |
902 | const struct cl_req_slice *slice, int ioret) | |
903 | { | |
904 | struct ccc_req *vrq; | |
905 | ||
906 | if (ioret > 0) | |
907 | cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret); | |
908 | ||
909 | vrq = cl2ccc_req(slice); | |
910 | OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem); | |
911 | } | |
912 | ||
913 | /** | |
914 | * Implementation of struct cl_req_operations::cro_attr_set() for ccc | |
915 | * layer. ccc is responsible for | |
916 | * | |
917 | * - o_[mac]time | |
918 | * | |
919 | * - o_mode | |
920 | * | |
921 | * - o_parent_seq | |
922 | * | |
923 | * - o_[ug]id | |
924 | * | |
925 | * - o_parent_oid | |
926 | * | |
927 | * - o_parent_ver | |
928 | * | |
929 | * - o_ioepoch, | |
930 | * | |
931 | * and capability. | |
932 | */ | |
933 | void ccc_req_attr_set(const struct lu_env *env, | |
934 | const struct cl_req_slice *slice, | |
935 | const struct cl_object *obj, | |
936 | struct cl_req_attr *attr, obd_valid flags) | |
937 | { | |
938 | struct inode *inode; | |
939 | struct obdo *oa; | |
940 | obd_flag valid_flags; | |
941 | ||
942 | oa = attr->cra_oa; | |
943 | inode = ccc_object_inode(obj); | |
944 | valid_flags = OBD_MD_FLTYPE; | |
945 | ||
946 | if ((flags & OBD_MD_FLOSSCAPA) != 0) { | |
947 | LASSERT(attr->cra_capa == NULL); | |
948 | attr->cra_capa = cl_capa_lookup(inode, | |
949 | slice->crs_req->crq_type); | |
950 | } | |
951 | ||
952 | if (slice->crs_req->crq_type == CRT_WRITE) { | |
953 | if (flags & OBD_MD_FLEPOCH) { | |
954 | oa->o_valid |= OBD_MD_FLEPOCH; | |
955 | oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch; | |
956 | valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME | | |
957 | OBD_MD_FLUID | OBD_MD_FLGID; | |
958 | } | |
959 | } | |
960 | obdo_from_inode(oa, inode, valid_flags & flags); | |
961 | obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid); | |
962 | memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid, | |
963 | JOBSTATS_JOBID_SIZE); | |
964 | } | |
965 | ||
966 | const struct cl_req_operations ccc_req_ops = { | |
967 | .cro_attr_set = ccc_req_attr_set, | |
968 | .cro_completion = ccc_req_completion | |
969 | }; | |
970 | ||
971 | int cl_setattr_ost(struct inode *inode, const struct iattr *attr, | |
972 | struct obd_capa *capa) | |
973 | { | |
974 | struct lu_env *env; | |
975 | struct cl_io *io; | |
976 | int result; | |
977 | int refcheck; | |
978 | ||
d7e09d03 PT |
979 | env = cl_env_get(&refcheck); |
980 | if (IS_ERR(env)) | |
981 | RETURN(PTR_ERR(env)); | |
982 | ||
983 | io = ccc_env_thread_io(env); | |
984 | io->ci_obj = cl_i2info(inode)->lli_clob; | |
985 | ||
986 | io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime); | |
987 | io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime); | |
988 | io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime); | |
989 | io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size; | |
990 | io->u.ci_setattr.sa_valid = attr->ia_valid; | |
991 | io->u.ci_setattr.sa_capa = capa; | |
992 | ||
993 | again: | |
994 | if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { | |
995 | struct ccc_io *cio = ccc_env_io(env); | |
996 | ||
997 | if (attr->ia_valid & ATTR_FILE) | |
998 | /* populate the file descriptor for ftruncate to honor | |
999 | * group lock - see LU-787 */ | |
1000 | cio->cui_fd = cl_iattr2fd(inode, attr); | |
1001 | ||
1002 | result = cl_io_loop(env, io); | |
1003 | } else { | |
1004 | result = io->ci_result; | |
1005 | } | |
1006 | cl_io_fini(env, io); | |
1007 | if (unlikely(io->ci_need_restart)) | |
1008 | goto again; | |
1009 | cl_env_put(env, &refcheck); | |
1010 | RETURN(result); | |
1011 | } | |
1012 | ||
1013 | /***************************************************************************** | |
1014 | * | |
1015 | * Type conversions. | |
1016 | * | |
1017 | */ | |
1018 | ||
1019 | struct lu_device *ccc2lu_dev(struct ccc_device *vdv) | |
1020 | { | |
1021 | return &vdv->cdv_cl.cd_lu_dev; | |
1022 | } | |
1023 | ||
1024 | struct ccc_device *lu2ccc_dev(const struct lu_device *d) | |
1025 | { | |
1026 | return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev); | |
1027 | } | |
1028 | ||
1029 | struct ccc_device *cl2ccc_dev(const struct cl_device *d) | |
1030 | { | |
1031 | return container_of0(d, struct ccc_device, cdv_cl); | |
1032 | } | |
1033 | ||
1034 | struct lu_object *ccc2lu(struct ccc_object *vob) | |
1035 | { | |
1036 | return &vob->cob_cl.co_lu; | |
1037 | } | |
1038 | ||
1039 | struct ccc_object *lu2ccc(const struct lu_object *obj) | |
1040 | { | |
1041 | return container_of0(obj, struct ccc_object, cob_cl.co_lu); | |
1042 | } | |
1043 | ||
1044 | struct ccc_object *cl2ccc(const struct cl_object *obj) | |
1045 | { | |
1046 | return container_of0(obj, struct ccc_object, cob_cl); | |
1047 | } | |
1048 | ||
1049 | struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice) | |
1050 | { | |
1051 | return container_of(slice, struct ccc_lock, clk_cl); | |
1052 | } | |
1053 | ||
1054 | struct ccc_io *cl2ccc_io(const struct lu_env *env, | |
1055 | const struct cl_io_slice *slice) | |
1056 | { | |
1057 | struct ccc_io *cio; | |
1058 | ||
1059 | cio = container_of(slice, struct ccc_io, cui_cl); | |
1060 | LASSERT(cio == ccc_env_io(env)); | |
1061 | return cio; | |
1062 | } | |
1063 | ||
1064 | struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice) | |
1065 | { | |
1066 | return container_of0(slice, struct ccc_req, crq_cl); | |
1067 | } | |
1068 | ||
1069 | struct page *cl2vm_page(const struct cl_page_slice *slice) | |
1070 | { | |
1071 | return cl2ccc_page(slice)->cpg_page; | |
1072 | } | |
1073 | ||
1074 | /***************************************************************************** | |
1075 | * | |
1076 | * Accessors. | |
1077 | * | |
1078 | */ | |
1079 | int ccc_object_invariant(const struct cl_object *obj) | |
1080 | { | |
1081 | struct inode *inode = ccc_object_inode(obj); | |
1082 | struct cl_inode_info *lli = cl_i2info(inode); | |
1083 | ||
1084 | return (S_ISREG(cl_inode_mode(inode)) || | |
1085 | /* i_mode of unlinked inode is zeroed. */ | |
1086 | cl_inode_mode(inode) == 0) && lli->lli_clob == obj; | |
1087 | } | |
1088 | ||
1089 | struct inode *ccc_object_inode(const struct cl_object *obj) | |
1090 | { | |
1091 | return cl2ccc(obj)->cob_inode; | |
1092 | } | |
1093 | ||
1094 | /** | |
1095 | * Returns a pointer to cl_page associated with \a vmpage, without acquiring | |
1096 | * additional reference to the resulting page. This is an unsafe version of | |
1097 | * cl_vmpage_page() that can only be used under vmpage lock. | |
1098 | */ | |
1099 | struct cl_page *ccc_vmpage_page_transient(struct page *vmpage) | |
1100 | { | |
1101 | KLASSERT(PageLocked(vmpage)); | |
1102 | return (struct cl_page *)vmpage->private; | |
1103 | } | |
1104 | ||
1105 | /** | |
1106 | * Initialize or update CLIO structures for regular files when new | |
1107 | * meta-data arrives from the server. | |
1108 | * | |
1109 | * \param inode regular file inode | |
1110 | * \param md new file metadata from MDS | |
1111 | * - allocates cl_object if necessary, | |
1112 | * - updated layout, if object was already here. | |
1113 | */ | |
1114 | int cl_file_inode_init(struct inode *inode, struct lustre_md *md) | |
1115 | { | |
1116 | struct lu_env *env; | |
1117 | struct cl_inode_info *lli; | |
1118 | struct cl_object *clob; | |
1119 | struct lu_site *site; | |
1120 | struct lu_fid *fid; | |
1121 | struct cl_object_conf conf = { | |
1122 | .coc_inode = inode, | |
1123 | .u = { | |
1124 | .coc_md = md | |
1125 | } | |
1126 | }; | |
1127 | int result = 0; | |
1128 | int refcheck; | |
1129 | ||
1130 | LASSERT(md->body->valid & OBD_MD_FLID); | |
1131 | LASSERT(S_ISREG(cl_inode_mode(inode))); | |
1132 | ||
1133 | env = cl_env_get(&refcheck); | |
1134 | if (IS_ERR(env)) | |
1135 | return PTR_ERR(env); | |
1136 | ||
1137 | site = cl_i2sbi(inode)->ll_site; | |
1138 | lli = cl_i2info(inode); | |
1139 | fid = &lli->lli_fid; | |
1140 | LASSERT(fid_is_sane(fid)); | |
1141 | ||
1142 | if (lli->lli_clob == NULL) { | |
1143 | /* clob is slave of inode, empty lli_clob means for new inode, | |
1144 | * there is no clob in cache with the given fid, so it is | |
1145 | * unnecessary to perform lookup-alloc-lookup-insert, just | |
1146 | * alloc and insert directly. */ | |
1147 | LASSERT(inode->i_state & I_NEW); | |
1148 | conf.coc_lu.loc_flags = LOC_F_NEW; | |
1149 | clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev), | |
1150 | fid, &conf); | |
1151 | if (!IS_ERR(clob)) { | |
1152 | /* | |
1153 | * No locking is necessary, as new inode is | |
1154 | * locked by I_NEW bit. | |
1155 | */ | |
1156 | lli->lli_clob = clob; | |
5dd16419 | 1157 | lli->lli_has_smd = lsm_has_objects(md->lsm); |
d7e09d03 PT |
1158 | lu_object_ref_add(&clob->co_lu, "inode", inode); |
1159 | } else | |
1160 | result = PTR_ERR(clob); | |
1161 | } else { | |
1162 | result = cl_conf_set(env, lli->lli_clob, &conf); | |
1163 | } | |
1164 | ||
1165 | cl_env_put(env, &refcheck); | |
1166 | ||
1167 | if (result != 0) | |
1168 | CERROR("Failure to initialize cl object "DFID": %d\n", | |
1169 | PFID(fid), result); | |
1170 | return result; | |
1171 | } | |
1172 | ||
1173 | /** | |
1174 | * Wait for others drop their references of the object at first, then we drop | |
1175 | * the last one, which will lead to the object be destroyed immediately. | |
1176 | * Must be called after cl_object_kill() against this object. | |
1177 | * | |
1178 | * The reason we want to do this is: destroying top object will wait for sub | |
1179 | * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs) | |
1180 | * to initiate top object destroying which may deadlock. See bz22520. | |
1181 | */ | |
1182 | static void cl_object_put_last(struct lu_env *env, struct cl_object *obj) | |
1183 | { | |
1184 | struct lu_object_header *header = obj->co_lu.lo_header; | |
1185 | wait_queue_t waiter; | |
1186 | ||
1187 | if (unlikely(atomic_read(&header->loh_ref) != 1)) { | |
1188 | struct lu_site *site = obj->co_lu.lo_dev->ld_site; | |
1189 | struct lu_site_bkt_data *bkt; | |
1190 | ||
1191 | bkt = lu_site_bkt_from_fid(site, &header->loh_fid); | |
1192 | ||
1193 | init_waitqueue_entry_current(&waiter); | |
1194 | add_wait_queue(&bkt->lsb_marche_funebre, &waiter); | |
1195 | ||
1196 | while (1) { | |
1197 | set_current_state(TASK_UNINTERRUPTIBLE); | |
1198 | if (atomic_read(&header->loh_ref) == 1) | |
1199 | break; | |
1200 | waitq_wait(&waiter, TASK_UNINTERRUPTIBLE); | |
1201 | } | |
1202 | ||
1203 | set_current_state(TASK_RUNNING); | |
1204 | remove_wait_queue(&bkt->lsb_marche_funebre, &waiter); | |
1205 | } | |
1206 | ||
1207 | cl_object_put(env, obj); | |
1208 | } | |
1209 | ||
1210 | void cl_inode_fini(struct inode *inode) | |
1211 | { | |
1212 | struct lu_env *env; | |
1213 | struct cl_inode_info *lli = cl_i2info(inode); | |
1214 | struct cl_object *clob = lli->lli_clob; | |
1215 | int refcheck; | |
1216 | int emergency; | |
1217 | ||
1218 | if (clob != NULL) { | |
1219 | void *cookie; | |
1220 | ||
1221 | cookie = cl_env_reenter(); | |
1222 | env = cl_env_get(&refcheck); | |
1223 | emergency = IS_ERR(env); | |
1224 | if (emergency) { | |
1225 | mutex_lock(&ccc_inode_fini_guard); | |
1226 | LASSERT(ccc_inode_fini_env != NULL); | |
1227 | cl_env_implant(ccc_inode_fini_env, &refcheck); | |
1228 | env = ccc_inode_fini_env; | |
1229 | } | |
1230 | /* | |
1231 | * cl_object cache is a slave to inode cache (which, in turn | |
1232 | * is a slave to dentry cache), don't keep cl_object in memory | |
1233 | * when its master is evicted. | |
1234 | */ | |
1235 | cl_object_kill(env, clob); | |
1236 | lu_object_ref_del(&clob->co_lu, "inode", inode); | |
1237 | cl_object_put_last(env, clob); | |
1238 | lli->lli_clob = NULL; | |
1239 | if (emergency) { | |
1240 | cl_env_unplant(ccc_inode_fini_env, &refcheck); | |
1241 | mutex_unlock(&ccc_inode_fini_guard); | |
1242 | } else | |
1243 | cl_env_put(env, &refcheck); | |
1244 | cl_env_reexit(cookie); | |
1245 | } | |
1246 | } | |
1247 | ||
1248 | /** | |
1249 | * return IF_* type for given lu_dirent entry. | |
1250 | * IF_* flag shld be converted to particular OS file type in | |
1251 | * platform llite module. | |
1252 | */ | |
1253 | __u16 ll_dirent_type_get(struct lu_dirent *ent) | |
1254 | { | |
1255 | __u16 type = 0; | |
1256 | struct luda_type *lt; | |
1257 | int len = 0; | |
1258 | ||
1259 | if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) { | |
1260 | const unsigned align = sizeof(struct luda_type) - 1; | |
1261 | ||
1262 | len = le16_to_cpu(ent->lde_namelen); | |
1263 | len = (len + align) & ~align; | |
1264 | lt = (void *)ent->lde_name + len; | |
1265 | type = IFTODT(le16_to_cpu(lt->lt_type)); | |
1266 | } | |
1267 | return type; | |
1268 | } | |
1269 | ||
1270 | /** | |
1271 | * build inode number from passed @fid */ | |
1272 | __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) | |
1273 | { | |
1274 | if (BITS_PER_LONG == 32 || api32) | |
1275 | RETURN(fid_flatten32(fid)); | |
1276 | else | |
1277 | RETURN(fid_flatten(fid)); | |
1278 | } | |
1279 | ||
1280 | /** | |
1281 | * build inode generation from passed @fid. If our FID overflows the 32-bit | |
1282 | * inode number then return a non-zero generation to distinguish them. */ | |
1283 | __u32 cl_fid_build_gen(const struct lu_fid *fid) | |
1284 | { | |
1285 | __u32 gen; | |
d7e09d03 PT |
1286 | |
1287 | if (fid_is_igif(fid)) { | |
1288 | gen = lu_igif_gen(fid); | |
1289 | RETURN(gen); | |
1290 | } | |
1291 | ||
1292 | gen = (fid_flatten(fid) >> 32); | |
1293 | RETURN(gen); | |
1294 | } | |
1295 | ||
1296 | /* lsm is unreliable after hsm implementation as layout can be changed at | |
1297 | * any time. This is only to support old, non-clio-ized interfaces. It will | |
1298 | * cause deadlock if clio operations are called with this extra layout refcount | |
1299 | * because in case the layout changed during the IO, ll_layout_refresh() will | |
1300 | * have to wait for the refcount to become zero to destroy the older layout. | |
1301 | * | |
1302 | * Notice that the lsm returned by this function may not be valid unless called | |
1303 | * inside layout lock - MDS_INODELOCK_LAYOUT. */ | |
1304 | struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode) | |
1305 | { | |
1306 | return lov_lsm_get(cl_i2info(inode)->lli_clob); | |
1307 | } | |
1308 | ||
1309 | void inline ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm) | |
1310 | { | |
1311 | lov_lsm_put(cl_i2info(inode)->lli_clob, lsm); | |
1312 | } |