staging/lustre/mdc: use cl_max_mds_md to pack getattr RPC
[linux-2.6-block.git] / drivers / staging / lustre / lustre / lclient / lcommon_cl.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * cl code shared between vvp and liblustre (and other Lustre clients in the
37 * future).
38 *
39 * Author: Nikita Danilov <nikita.danilov@sun.com>
40 */
41
42#define DEBUG_SUBSYSTEM S_LLITE
43
44# include <linux/libcfs/libcfs.h>
45# include <linux/fs.h>
46# include <linux/sched.h>
47# include <linux/mm.h>
48# include <linux/quotaops.h>
49# include <linux/highmem.h>
50# include <linux/pagemap.h>
51# include <linux/rbtree.h>
52
53#include <obd.h>
54#include <obd_support.h>
55#include <lustre_fid.h>
56#include <lustre_lite.h>
57#include <lustre_dlm.h>
58#include <lustre_ver.h>
59#include <lustre_mdc.h>
60#include <cl_object.h>
61
62#include <lclient.h>
63
64#include "../llite/llite_internal.h"
65
66const struct cl_req_operations ccc_req_ops;
67
68/*
69 * ccc_ prefix stands for "Common Client Code".
70 */
71
72static struct kmem_cache *ccc_lock_kmem;
73static struct kmem_cache *ccc_object_kmem;
74static struct kmem_cache *ccc_thread_kmem;
75static struct kmem_cache *ccc_session_kmem;
76static struct kmem_cache *ccc_req_kmem;
77
78static struct lu_kmem_descr ccc_caches[] = {
79 {
80 .ckd_cache = &ccc_lock_kmem,
81 .ckd_name = "ccc_lock_kmem",
d0631921 82 .ckd_size = sizeof(struct ccc_lock)
d7e09d03
PT
83 },
84 {
85 .ckd_cache = &ccc_object_kmem,
86 .ckd_name = "ccc_object_kmem",
d0631921 87 .ckd_size = sizeof(struct ccc_object)
d7e09d03
PT
88 },
89 {
90 .ckd_cache = &ccc_thread_kmem,
91 .ckd_name = "ccc_thread_kmem",
d0631921 92 .ckd_size = sizeof(struct ccc_thread_info),
d7e09d03
PT
93 },
94 {
95 .ckd_cache = &ccc_session_kmem,
96 .ckd_name = "ccc_session_kmem",
d0631921 97 .ckd_size = sizeof(struct ccc_session)
d7e09d03
PT
98 },
99 {
100 .ckd_cache = &ccc_req_kmem,
101 .ckd_name = "ccc_req_kmem",
d0631921 102 .ckd_size = sizeof(struct ccc_req)
d7e09d03
PT
103 },
104 {
105 .ckd_cache = NULL
106 }
107};
108
109/*****************************************************************************
110 *
111 * Vvp device and device type functions.
112 *
113 */
114
0be19afa 115void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
d7e09d03
PT
116{
117 struct ccc_thread_info *info;
118
0be19afa 119 OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, GFP_NOFS);
d7e09d03
PT
120 if (info == NULL)
121 info = ERR_PTR(-ENOMEM);
122 return info;
123}
124
125void ccc_key_fini(const struct lu_context *ctx,
126 struct lu_context_key *key, void *data)
127{
128 struct ccc_thread_info *info = data;
129 OBD_SLAB_FREE_PTR(info, ccc_thread_kmem);
130}
131
132void *ccc_session_key_init(const struct lu_context *ctx,
133 struct lu_context_key *key)
134{
135 struct ccc_session *session;
136
0be19afa 137 OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, GFP_NOFS);
d7e09d03
PT
138 if (session == NULL)
139 session = ERR_PTR(-ENOMEM);
140 return session;
141}
142
143void ccc_session_key_fini(const struct lu_context *ctx,
144 struct lu_context_key *key, void *data)
145{
146 struct ccc_session *session = data;
147 OBD_SLAB_FREE_PTR(session, ccc_session_kmem);
148}
149
150struct lu_context_key ccc_key = {
151 .lct_tags = LCT_CL_THREAD,
152 .lct_init = ccc_key_init,
153 .lct_fini = ccc_key_fini
154};
155
156struct lu_context_key ccc_session_key = {
157 .lct_tags = LCT_SESSION,
158 .lct_init = ccc_session_key_init,
159 .lct_fini = ccc_session_key_fini
160};
161
162
163/* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
5152bda8 164/* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */
d7e09d03
PT
165
166int ccc_device_init(const struct lu_env *env, struct lu_device *d,
167 const char *name, struct lu_device *next)
168{
169 struct ccc_device *vdv;
170 int rc;
d7e09d03
PT
171
172 vdv = lu2ccc_dev(d);
173 vdv->cdv_next = lu2cl_dev(next);
174
175 LASSERT(d->ld_site != NULL && next->ld_type != NULL);
176 next->ld_site = d->ld_site;
177 rc = next->ld_type->ldt_ops->ldto_device_init(
178 env, next, next->ld_type->ldt_name, NULL);
179 if (rc == 0) {
180 lu_device_get(next);
181 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
182 }
0a3bdb00 183 return rc;
d7e09d03
PT
184}
185
186struct lu_device *ccc_device_fini(const struct lu_env *env,
187 struct lu_device *d)
188{
189 return cl2lu_dev(lu2ccc_dev(d)->cdv_next);
190}
191
192struct lu_device *ccc_device_alloc(const struct lu_env *env,
193 struct lu_device_type *t,
194 struct lustre_cfg *cfg,
195 const struct lu_device_operations *luops,
196 const struct cl_device_operations *clops)
197{
198 struct ccc_device *vdv;
199 struct lu_device *lud;
200 struct cl_site *site;
201 int rc;
d7e09d03
PT
202
203 OBD_ALLOC_PTR(vdv);
204 if (vdv == NULL)
0a3bdb00 205 return ERR_PTR(-ENOMEM);
d7e09d03
PT
206
207 lud = &vdv->cdv_cl.cd_lu_dev;
208 cl_device_init(&vdv->cdv_cl, t);
209 ccc2lu_dev(vdv)->ld_ops = luops;
210 vdv->cdv_cl.cd_ops = clops;
211
212 OBD_ALLOC_PTR(site);
213 if (site != NULL) {
214 rc = cl_site_init(site, &vdv->cdv_cl);
215 if (rc == 0)
216 rc = lu_site_init_finish(&site->cs_lu);
217 else {
218 LASSERT(lud->ld_site == NULL);
219 CERROR("Cannot init lu_site, rc %d.\n", rc);
220 OBD_FREE_PTR(site);
221 }
222 } else
223 rc = -ENOMEM;
224 if (rc != 0) {
225 ccc_device_free(env, lud);
226 lud = ERR_PTR(rc);
227 }
0a3bdb00 228 return lud;
d7e09d03
PT
229}
230
231struct lu_device *ccc_device_free(const struct lu_env *env,
232 struct lu_device *d)
233{
234 struct ccc_device *vdv = lu2ccc_dev(d);
235 struct cl_site *site = lu2cl_site(d->ld_site);
236 struct lu_device *next = cl2lu_dev(vdv->cdv_next);
237
238 if (d->ld_site != NULL) {
239 cl_site_fini(site);
240 OBD_FREE_PTR(site);
241 }
242 cl_device_fini(lu2cl_dev(d));
243 OBD_FREE_PTR(vdv);
244 return next;
245}
246
247int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
248 struct cl_req *req)
249{
250 struct ccc_req *vrq;
251 int result;
252
0be19afa 253 OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, GFP_NOFS);
d7e09d03
PT
254 if (vrq != NULL) {
255 cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
256 result = 0;
257 } else
258 result = -ENOMEM;
259 return result;
260}
261
262/**
263 * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
264 * fails. Access to this environment is serialized by ccc_inode_fini_guard
265 * mutex.
266 */
267static struct lu_env *ccc_inode_fini_env = NULL;
268
269/**
270 * A mutex serializing calls to slp_inode_fini() under extreme memory
271 * pressure, when environments cannot be allocated.
272 */
273static DEFINE_MUTEX(ccc_inode_fini_guard);
274static int dummy_refcheck;
275
276int ccc_global_init(struct lu_device_type *device_type)
277{
278 int result;
279
280 result = lu_kmem_init(ccc_caches);
281 if (result)
282 return result;
283
284 result = lu_device_type_init(device_type);
285 if (result)
286 goto out_kmem;
287
288 ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
289 LCT_REMEMBER|LCT_NOREF);
290 if (IS_ERR(ccc_inode_fini_env)) {
291 result = PTR_ERR(ccc_inode_fini_env);
292 goto out_device;
293 }
294
295 ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
296 return 0;
297out_device:
298 lu_device_type_fini(device_type);
299out_kmem:
300 lu_kmem_fini(ccc_caches);
301 return result;
302}
303
304void ccc_global_fini(struct lu_device_type *device_type)
305{
306 if (ccc_inode_fini_env != NULL) {
307 cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
308 ccc_inode_fini_env = NULL;
309 }
310 lu_device_type_fini(device_type);
311 lu_kmem_fini(ccc_caches);
312}
313
314/*****************************************************************************
315 *
316 * Object operations.
317 *
318 */
319
320struct lu_object *ccc_object_alloc(const struct lu_env *env,
321 const struct lu_object_header *unused,
322 struct lu_device *dev,
323 const struct cl_object_operations *clops,
324 const struct lu_object_operations *luops)
325{
326 struct ccc_object *vob;
327 struct lu_object *obj;
328
0be19afa 329 OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, GFP_NOFS);
d7e09d03
PT
330 if (vob != NULL) {
331 struct cl_object_header *hdr;
332
333 obj = ccc2lu(vob);
334 hdr = &vob->cob_header;
335 cl_object_header_init(hdr);
336 lu_object_init(obj, &hdr->coh_lu, dev);
337 lu_object_add_top(&hdr->coh_lu, obj);
338
339 vob->cob_cl.co_ops = clops;
340 obj->lo_ops = luops;
341 } else
342 obj = NULL;
343 return obj;
344}
345
346int ccc_object_init0(const struct lu_env *env,
347 struct ccc_object *vob,
348 const struct cl_object_conf *conf)
349{
350 vob->cob_inode = conf->coc_inode;
351 vob->cob_transient_pages = 0;
352 cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
353 return 0;
354}
355
356int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
357 const struct lu_object_conf *conf)
358{
359 struct ccc_device *dev = lu2ccc_dev(obj->lo_dev);
360 struct ccc_object *vob = lu2ccc(obj);
361 struct lu_object *below;
362 struct lu_device *under;
363 int result;
364
365 under = &dev->cdv_next->cd_lu_dev;
366 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
367 if (below != NULL) {
368 const struct cl_object_conf *cconf;
369
370 cconf = lu2cl_conf(conf);
371 INIT_LIST_HEAD(&vob->cob_pending_list);
372 lu_object_add(obj, below);
373 result = ccc_object_init0(env, vob, cconf);
374 } else
375 result = -ENOMEM;
376 return result;
377}
378
379void ccc_object_free(const struct lu_env *env, struct lu_object *obj)
380{
381 struct ccc_object *vob = lu2ccc(obj);
382
383 lu_object_fini(obj);
384 lu_object_header_fini(obj->lo_header);
385 OBD_SLAB_FREE_PTR(vob, ccc_object_kmem);
386}
387
388int ccc_lock_init(const struct lu_env *env,
389 struct cl_object *obj, struct cl_lock *lock,
390 const struct cl_io *unused,
391 const struct cl_lock_operations *lkops)
392{
393 struct ccc_lock *clk;
394 int result;
395
396 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
397
0be19afa 398 OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, GFP_NOFS);
d7e09d03
PT
399 if (clk != NULL) {
400 cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
401 result = 0;
402 } else
403 result = -ENOMEM;
404 return result;
405}
406
407int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
408 const struct cl_attr *attr, unsigned valid)
409{
410 return 0;
411}
412
413int ccc_object_glimpse(const struct lu_env *env,
414 const struct cl_object *obj, struct ost_lvb *lvb)
415{
416 struct inode *inode = ccc_object_inode(obj);
417
d7e09d03
PT
418 lvb->lvb_mtime = cl_inode_mtime(inode);
419 lvb->lvb_atime = cl_inode_atime(inode);
420 lvb->lvb_ctime = cl_inode_ctime(inode);
421 /*
422 * LU-417: Add dirty pages block count lest i_blocks reports 0, some
423 * "cp" or "tar" on remote node may think it's a completely sparse file
424 * and skip it.
425 */
426 if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
427 lvb->lvb_blocks = dirty_cnt(inode);
0a3bdb00 428 return 0;
d7e09d03
PT
429}
430
431
432
433int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
434 const struct cl_object_conf *conf)
435{
436 /* TODO: destroy all pages attached to this object. */
437 return 0;
438}
439
440static void ccc_object_size_lock(struct cl_object *obj)
441{
442 struct inode *inode = ccc_object_inode(obj);
443
444 cl_isize_lock(inode);
445 cl_object_attr_lock(obj);
446}
447
448static void ccc_object_size_unlock(struct cl_object *obj)
449{
450 struct inode *inode = ccc_object_inode(obj);
451
452 cl_object_attr_unlock(obj);
453 cl_isize_unlock(inode);
454}
455
456/*****************************************************************************
457 *
458 * Page operations.
459 *
460 */
461
462struct page *ccc_page_vmpage(const struct lu_env *env,
463 const struct cl_page_slice *slice)
464{
465 return cl2vm_page(slice);
466}
467
468int ccc_page_is_under_lock(const struct lu_env *env,
469 const struct cl_page_slice *slice,
470 struct cl_io *io)
471{
472 struct ccc_io *cio = ccc_env_io(env);
473 struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
474 struct cl_page *page = slice->cpl_page;
475
476 int result;
477
d7e09d03
PT
478 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
479 io->ci_type == CIT_FAULT) {
480 if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
481 result = -EBUSY;
482 else {
483 desc->cld_start = page->cp_index;
484 desc->cld_end = page->cp_index;
485 desc->cld_obj = page->cp_obj;
486 desc->cld_mode = CLM_READ;
487 result = cl_queue_match(&io->ci_lockset.cls_done,
488 desc) ? -EBUSY : 0;
489 }
490 } else
491 result = 0;
0a3bdb00 492 return result;
d7e09d03
PT
493}
494
495int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
496{
497 /*
498 * Cached read?
499 */
500 LBUG();
501 return 0;
502}
503
504void ccc_transient_page_verify(const struct cl_page *page)
505{
506}
507
508int ccc_transient_page_own(const struct lu_env *env,
509 const struct cl_page_slice *slice,
510 struct cl_io *unused,
511 int nonblock)
512{
513 ccc_transient_page_verify(slice->cpl_page);
514 return 0;
515}
516
517void ccc_transient_page_assume(const struct lu_env *env,
518 const struct cl_page_slice *slice,
519 struct cl_io *unused)
520{
521 ccc_transient_page_verify(slice->cpl_page);
522}
523
524void ccc_transient_page_unassume(const struct lu_env *env,
525 const struct cl_page_slice *slice,
526 struct cl_io *unused)
527{
528 ccc_transient_page_verify(slice->cpl_page);
529}
530
531void ccc_transient_page_disown(const struct lu_env *env,
532 const struct cl_page_slice *slice,
533 struct cl_io *unused)
534{
535 ccc_transient_page_verify(slice->cpl_page);
536}
537
538void ccc_transient_page_discard(const struct lu_env *env,
539 const struct cl_page_slice *slice,
540 struct cl_io *unused)
541{
542 struct cl_page *page = slice->cpl_page;
543
544 ccc_transient_page_verify(slice->cpl_page);
545
546 /*
547 * For transient pages, remove it from the radix tree.
548 */
549 cl_page_delete(env, page);
550}
551
552int ccc_transient_page_prep(const struct lu_env *env,
553 const struct cl_page_slice *slice,
554 struct cl_io *unused)
555{
d7e09d03 556 /* transient page should always be sent. */
0a3bdb00 557 return 0;
d7e09d03
PT
558}
559
560/*****************************************************************************
561 *
562 * Lock operations.
563 *
564 */
565
566void ccc_lock_delete(const struct lu_env *env,
567 const struct cl_lock_slice *slice)
568{
569 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
570}
571
572void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
573{
574 struct ccc_lock *clk = cl2ccc_lock(slice);
575 OBD_SLAB_FREE_PTR(clk, ccc_lock_kmem);
576}
577
578int ccc_lock_enqueue(const struct lu_env *env,
579 const struct cl_lock_slice *slice,
580 struct cl_io *unused, __u32 enqflags)
581{
582 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
583 return 0;
584}
585
586int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
587{
588 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
589 return 0;
590}
591
592int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice)
593{
594 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
595 return 0;
596}
597
598/**
599 * Implementation of cl_lock_operations::clo_fits_into() methods for ccc
600 * layer. This function is executed every time io finds an existing lock in
601 * the lock cache while creating new lock. This function has to decide whether
602 * cached lock "fits" into io.
603 *
604 * \param slice lock to be checked
605 * \param io IO that wants a lock.
606 *
607 * \see lov_lock_fits_into().
608 */
609int ccc_lock_fits_into(const struct lu_env *env,
610 const struct cl_lock_slice *slice,
611 const struct cl_lock_descr *need,
612 const struct cl_io *io)
613{
614 const struct cl_lock *lock = slice->cls_lock;
615 const struct cl_lock_descr *descr = &lock->cll_descr;
616 const struct ccc_io *cio = ccc_env_io(env);
617 int result;
618
d7e09d03
PT
619 /*
620 * Work around DLM peculiarity: it assumes that glimpse
621 * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
622 * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make
623 * sure that glimpse doesn't get CLM_WRITE top-lock, so that it
624 * doesn't enqueue CLM_WRITE sub-locks.
625 */
626 if (cio->cui_glimpse)
627 result = descr->cld_mode != CLM_WRITE;
628
629 /*
630 * Also, don't match incomplete write locks for read, otherwise read
631 * would enqueue missing sub-locks in the write mode.
632 */
633 else if (need->cld_mode != descr->cld_mode)
634 result = lock->cll_state >= CLS_ENQUEUED;
635 else
636 result = 1;
0a3bdb00 637 return result;
d7e09d03
PT
638}
639
640/**
641 * Implements cl_lock_operations::clo_state() method for ccc layer, invoked
642 * whenever lock state changes. Transfers object attributes, that might be
643 * updated as a result of lock acquiring into inode.
644 */
645void ccc_lock_state(const struct lu_env *env,
646 const struct cl_lock_slice *slice,
647 enum cl_lock_state state)
648{
649 struct cl_lock *lock = slice->cls_lock;
d7e09d03
PT
650
651 /*
652 * Refresh inode attributes when the lock is moving into CLS_HELD
653 * state, and only when this is a result of real enqueue, rather than
654 * of finding lock in the cache.
655 */
656 if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
657 struct cl_object *obj;
658 struct inode *inode;
659
660 obj = slice->cls_obj;
661 inode = ccc_object_inode(obj);
662
663 /* vmtruncate() sets the i_size
664 * under both a DLM lock and the
665 * ll_inode_size_lock(). If we don't get the
666 * ll_inode_size_lock() here we can match the DLM lock and
667 * reset i_size. generic_file_write can then trust the
668 * stale i_size when doing appending writes and effectively
669 * cancel the result of the truncate. Getting the
670 * ll_inode_size_lock() after the enqueue maintains the DLM
671 * -> ll_inode_size_lock() acquiring order. */
672 if (lock->cll_descr.cld_start == 0 &&
673 lock->cll_descr.cld_end == CL_PAGE_EOF)
674 cl_merge_lvb(env, inode);
675 }
d7e09d03
PT
676}
677
678/*****************************************************************************
679 *
680 * io operations.
681 *
682 */
683
684void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
685{
686 struct cl_io *io = ios->cis_io;
687
688 CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
689}
690
691int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
692 __u32 enqflags, enum cl_lock_mode mode,
693 pgoff_t start, pgoff_t end)
694{
695 struct ccc_io *cio = ccc_env_io(env);
696 struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
697 struct cl_object *obj = io->ci_obj;
698
699 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
d7e09d03
PT
700
701 CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
702
ec83e611 703 memset(&cio->cui_link, 0, sizeof(cio->cui_link));
d7e09d03
PT
704
705 if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
706 descr->cld_mode = CLM_GROUP;
707 descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
708 } else {
709 descr->cld_mode = mode;
710 }
711 descr->cld_obj = obj;
712 descr->cld_start = start;
713 descr->cld_end = end;
714 descr->cld_enq_flags = enqflags;
715
716 cl_io_lock_add(env, io, &cio->cui_link);
0a3bdb00 717 return 0;
d7e09d03
PT
718}
719
720void ccc_io_update_iov(const struct lu_env *env,
721 struct ccc_io *cio, struct cl_io *io)
722{
723 int i;
724 size_t size = io->u.ci_rw.crw_count;
725
726 cio->cui_iov_olen = 0;
727 if (!cl_is_normalio(env, io) || cio->cui_tot_nrsegs == 0)
728 return;
729
730 for (i = 0; i < cio->cui_tot_nrsegs; i++) {
731 struct iovec *iv = &cio->cui_iov[i];
732
733 if (iv->iov_len < size)
734 size -= iv->iov_len;
735 else {
736 if (iv->iov_len > size) {
737 cio->cui_iov_olen = iv->iov_len;
738 iv->iov_len = size;
739 }
740 break;
741 }
742 }
743
744 cio->cui_nrsegs = i + 1;
745 LASSERTF(cio->cui_tot_nrsegs >= cio->cui_nrsegs,
746 "tot_nrsegs: %lu, nrsegs: %lu\n",
747 cio->cui_tot_nrsegs, cio->cui_nrsegs);
748}
749
750int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
751 __u32 enqflags, enum cl_lock_mode mode,
752 loff_t start, loff_t end)
753{
754 struct cl_object *obj = io->ci_obj;
755 return ccc_io_one_lock_index(env, io, enqflags, mode,
756 cl_index(obj, start), cl_index(obj, end));
757}
758
759void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
760{
761 CLOBINVRNT(env, ios->cis_io->ci_obj,
762 ccc_object_invariant(ios->cis_io->ci_obj));
763}
764
765void ccc_io_advance(const struct lu_env *env,
766 const struct cl_io_slice *ios,
767 size_t nob)
768{
769 struct ccc_io *cio = cl2ccc_io(env, ios);
770 struct cl_io *io = ios->cis_io;
771 struct cl_object *obj = ios->cis_io->ci_obj;
772
773 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
774
775 if (!cl_is_normalio(env, io))
776 return;
777
778 LASSERT(cio->cui_tot_nrsegs >= cio->cui_nrsegs);
779 LASSERT(cio->cui_tot_count >= nob);
780
781 cio->cui_iov += cio->cui_nrsegs;
782 cio->cui_tot_nrsegs -= cio->cui_nrsegs;
783 cio->cui_tot_count -= nob;
784
785 /* update the iov */
786 if (cio->cui_iov_olen > 0) {
787 struct iovec *iv;
788
789 cio->cui_iov--;
790 cio->cui_tot_nrsegs++;
791 iv = &cio->cui_iov[0];
792 if (io->ci_continue) {
793 iv->iov_base += iv->iov_len;
794 LASSERT(cio->cui_iov_olen > iv->iov_len);
795 iv->iov_len = cio->cui_iov_olen - iv->iov_len;
796 } else {
797 /* restore the iov_len, in case of restart io. */
798 iv->iov_len = cio->cui_iov_olen;
799 }
800 cio->cui_iov_olen = 0;
801 }
802}
803
804/**
805 * Helper function that if necessary adjusts file size (inode->i_size), when
806 * position at the offset \a pos is accessed. File size can be arbitrary stale
807 * on a Lustre client, but client at least knows KMS. If accessed area is
808 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
809 *
810 * Locking: cl_isize_lock is used to serialize changes to inode size and to
811 * protect consistency between inode size and cl_object
812 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
813 * top-object and sub-objects.
814 */
815int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
816 struct cl_io *io, loff_t start, size_t count, int *exceed)
817{
818 struct cl_attr *attr = ccc_env_thread_attr(env);
819 struct inode *inode = ccc_object_inode(obj);
820 loff_t pos = start + count - 1;
821 loff_t kms;
822 int result;
823
824 /*
825 * Consistency guarantees: following possibilities exist for the
826 * relation between region being accessed and real file size at this
827 * moment:
828 *
829 * (A): the region is completely inside of the file;
830 *
831 * (B-x): x bytes of region are inside of the file, the rest is
832 * outside;
833 *
834 * (C): the region is completely outside of the file.
835 *
836 * This classification is stable under DLM lock already acquired by
837 * the caller, because to change the class, other client has to take
838 * DLM lock conflicting with our lock. Also, any updates to ->i_size
839 * by other threads on this client are serialized by
840 * ll_inode_size_lock(). This guarantees that short reads are handled
841 * correctly in the face of concurrent writes and truncates.
842 */
843 ccc_object_size_lock(obj);
844 result = cl_object_attr_get(env, obj, attr);
845 if (result == 0) {
846 kms = attr->cat_kms;
847 if (pos > kms) {
848 /*
849 * A glimpse is necessary to determine whether we
850 * return a short read (B) or some zeroes at the end
851 * of the buffer (C)
852 */
853 ccc_object_size_unlock(obj);
854 result = cl_glimpse_lock(env, io, inode, obj, 0);
855 if (result == 0 && exceed != NULL) {
856 /* If objective page index exceed end-of-file
857 * page index, return directly. Do not expect
858 * kernel will check such case correctly.
859 * linux-2.6.18-128.1.1 miss to do that.
860 * --bug 17336 */
861 loff_t size = cl_isize_read(inode);
862 unsigned long cur_index = start >> PAGE_CACHE_SHIFT;
863
864 if ((size == 0 && cur_index != 0) ||
865 (((size - 1) >> PAGE_CACHE_SHIFT) < cur_index))
866 *exceed = 1;
867 }
868 return result;
869 } else {
870 /*
871 * region is within kms and, hence, within real file
872 * size (A). We need to increase i_size to cover the
873 * read region so that generic_file_read() will do its
874 * job, but that doesn't mean the kms size is
875 * _correct_, it is only the _minimum_ size. If
876 * someone does a stat they will get the correct size
877 * which will always be >= the kms value here.
878 * b=11081
879 */
880 if (cl_isize_read(inode) < kms) {
881 cl_isize_write_nolock(inode, kms);
882 CDEBUG(D_VFSTRACE,
883 DFID" updating i_size "LPU64"\n",
884 PFID(lu_object_fid(&obj->co_lu)),
885 (__u64)cl_isize_read(inode));
886
887 }
888 }
889 }
890 ccc_object_size_unlock(obj);
891 return result;
892}
893
894/*****************************************************************************
895 *
896 * Transfer operations.
897 *
898 */
899
900void ccc_req_completion(const struct lu_env *env,
901 const struct cl_req_slice *slice, int ioret)
902{
903 struct ccc_req *vrq;
904
905 if (ioret > 0)
906 cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
907
908 vrq = cl2ccc_req(slice);
909 OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem);
910}
911
912/**
913 * Implementation of struct cl_req_operations::cro_attr_set() for ccc
914 * layer. ccc is responsible for
915 *
916 * - o_[mac]time
917 *
918 * - o_mode
919 *
920 * - o_parent_seq
921 *
922 * - o_[ug]id
923 *
924 * - o_parent_oid
925 *
926 * - o_parent_ver
927 *
928 * - o_ioepoch,
929 *
930 * and capability.
931 */
932void ccc_req_attr_set(const struct lu_env *env,
933 const struct cl_req_slice *slice,
934 const struct cl_object *obj,
935 struct cl_req_attr *attr, obd_valid flags)
936{
937 struct inode *inode;
938 struct obdo *oa;
939 obd_flag valid_flags;
940
941 oa = attr->cra_oa;
942 inode = ccc_object_inode(obj);
943 valid_flags = OBD_MD_FLTYPE;
944
945 if ((flags & OBD_MD_FLOSSCAPA) != 0) {
946 LASSERT(attr->cra_capa == NULL);
947 attr->cra_capa = cl_capa_lookup(inode,
948 slice->crs_req->crq_type);
949 }
950
951 if (slice->crs_req->crq_type == CRT_WRITE) {
952 if (flags & OBD_MD_FLEPOCH) {
953 oa->o_valid |= OBD_MD_FLEPOCH;
954 oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
955 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
956 OBD_MD_FLUID | OBD_MD_FLGID;
957 }
958 }
959 obdo_from_inode(oa, inode, valid_flags & flags);
960 obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
961 memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
962 JOBSTATS_JOBID_SIZE);
963}
964
965const struct cl_req_operations ccc_req_ops = {
966 .cro_attr_set = ccc_req_attr_set,
967 .cro_completion = ccc_req_completion
968};
969
970int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
971 struct obd_capa *capa)
972{
973 struct lu_env *env;
974 struct cl_io *io;
975 int result;
976 int refcheck;
977
d7e09d03
PT
978 env = cl_env_get(&refcheck);
979 if (IS_ERR(env))
0a3bdb00 980 return PTR_ERR(env);
d7e09d03
PT
981
982 io = ccc_env_thread_io(env);
983 io->ci_obj = cl_i2info(inode)->lli_clob;
984
985 io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
986 io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
987 io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
988 io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
989 io->u.ci_setattr.sa_valid = attr->ia_valid;
990 io->u.ci_setattr.sa_capa = capa;
991
992again:
993 if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
994 struct ccc_io *cio = ccc_env_io(env);
995
996 if (attr->ia_valid & ATTR_FILE)
997 /* populate the file descriptor for ftruncate to honor
998 * group lock - see LU-787 */
999 cio->cui_fd = cl_iattr2fd(inode, attr);
1000
1001 result = cl_io_loop(env, io);
1002 } else {
1003 result = io->ci_result;
1004 }
1005 cl_io_fini(env, io);
1006 if (unlikely(io->ci_need_restart))
1007 goto again;
5ea17d6c
JL
1008 /* HSM import case: file is released, cannot be restored
1009 * no need to fail except if restore registration failed
1010 * with -ENODATA */
1011 if (result == -ENODATA && io->ci_restore_needed &&
1012 io->ci_result != -ENODATA)
1013 result = 0;
d7e09d03 1014 cl_env_put(env, &refcheck);
0a3bdb00 1015 return result;
d7e09d03
PT
1016}
1017
1018/*****************************************************************************
1019 *
1020 * Type conversions.
1021 *
1022 */
1023
1024struct lu_device *ccc2lu_dev(struct ccc_device *vdv)
1025{
1026 return &vdv->cdv_cl.cd_lu_dev;
1027}
1028
1029struct ccc_device *lu2ccc_dev(const struct lu_device *d)
1030{
1031 return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev);
1032}
1033
1034struct ccc_device *cl2ccc_dev(const struct cl_device *d)
1035{
1036 return container_of0(d, struct ccc_device, cdv_cl);
1037}
1038
1039struct lu_object *ccc2lu(struct ccc_object *vob)
1040{
1041 return &vob->cob_cl.co_lu;
1042}
1043
1044struct ccc_object *lu2ccc(const struct lu_object *obj)
1045{
1046 return container_of0(obj, struct ccc_object, cob_cl.co_lu);
1047}
1048
1049struct ccc_object *cl2ccc(const struct cl_object *obj)
1050{
1051 return container_of0(obj, struct ccc_object, cob_cl);
1052}
1053
1054struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
1055{
1056 return container_of(slice, struct ccc_lock, clk_cl);
1057}
1058
1059struct ccc_io *cl2ccc_io(const struct lu_env *env,
1060 const struct cl_io_slice *slice)
1061{
1062 struct ccc_io *cio;
1063
1064 cio = container_of(slice, struct ccc_io, cui_cl);
1065 LASSERT(cio == ccc_env_io(env));
1066 return cio;
1067}
1068
1069struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
1070{
1071 return container_of0(slice, struct ccc_req, crq_cl);
1072}
1073
1074struct page *cl2vm_page(const struct cl_page_slice *slice)
1075{
1076 return cl2ccc_page(slice)->cpg_page;
1077}
1078
1079/*****************************************************************************
1080 *
1081 * Accessors.
1082 *
1083 */
1084int ccc_object_invariant(const struct cl_object *obj)
1085{
1086 struct inode *inode = ccc_object_inode(obj);
1087 struct cl_inode_info *lli = cl_i2info(inode);
1088
1089 return (S_ISREG(cl_inode_mode(inode)) ||
1090 /* i_mode of unlinked inode is zeroed. */
1091 cl_inode_mode(inode) == 0) && lli->lli_clob == obj;
1092}
1093
1094struct inode *ccc_object_inode(const struct cl_object *obj)
1095{
1096 return cl2ccc(obj)->cob_inode;
1097}
1098
1099/**
1100 * Returns a pointer to cl_page associated with \a vmpage, without acquiring
1101 * additional reference to the resulting page. This is an unsafe version of
1102 * cl_vmpage_page() that can only be used under vmpage lock.
1103 */
1104struct cl_page *ccc_vmpage_page_transient(struct page *vmpage)
1105{
1106 KLASSERT(PageLocked(vmpage));
1107 return (struct cl_page *)vmpage->private;
1108}
1109
1110/**
1111 * Initialize or update CLIO structures for regular files when new
1112 * meta-data arrives from the server.
1113 *
1114 * \param inode regular file inode
1115 * \param md new file metadata from MDS
1116 * - allocates cl_object if necessary,
1117 * - updated layout, if object was already here.
1118 */
1119int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
1120{
1121 struct lu_env *env;
1122 struct cl_inode_info *lli;
1123 struct cl_object *clob;
1124 struct lu_site *site;
1125 struct lu_fid *fid;
1126 struct cl_object_conf conf = {
1127 .coc_inode = inode,
1128 .u = {
1129 .coc_md = md
1130 }
1131 };
1132 int result = 0;
1133 int refcheck;
1134
1135 LASSERT(md->body->valid & OBD_MD_FLID);
1136 LASSERT(S_ISREG(cl_inode_mode(inode)));
1137
1138 env = cl_env_get(&refcheck);
1139 if (IS_ERR(env))
1140 return PTR_ERR(env);
1141
1142 site = cl_i2sbi(inode)->ll_site;
1143 lli = cl_i2info(inode);
1144 fid = &lli->lli_fid;
1145 LASSERT(fid_is_sane(fid));
1146
1147 if (lli->lli_clob == NULL) {
1148 /* clob is slave of inode, empty lli_clob means for new inode,
1149 * there is no clob in cache with the given fid, so it is
1150 * unnecessary to perform lookup-alloc-lookup-insert, just
1151 * alloc and insert directly. */
1152 LASSERT(inode->i_state & I_NEW);
1153 conf.coc_lu.loc_flags = LOC_F_NEW;
1154 clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
1155 fid, &conf);
1156 if (!IS_ERR(clob)) {
1157 /*
1158 * No locking is necessary, as new inode is
1159 * locked by I_NEW bit.
1160 */
1161 lli->lli_clob = clob;
5dd16419 1162 lli->lli_has_smd = lsm_has_objects(md->lsm);
d7e09d03
PT
1163 lu_object_ref_add(&clob->co_lu, "inode", inode);
1164 } else
1165 result = PTR_ERR(clob);
1166 } else {
1167 result = cl_conf_set(env, lli->lli_clob, &conf);
1168 }
1169
1170 cl_env_put(env, &refcheck);
1171
1172 if (result != 0)
1173 CERROR("Failure to initialize cl object "DFID": %d\n",
1174 PFID(fid), result);
1175 return result;
1176}
1177
1178/**
1179 * Wait for others drop their references of the object at first, then we drop
1180 * the last one, which will lead to the object be destroyed immediately.
1181 * Must be called after cl_object_kill() against this object.
1182 *
1183 * The reason we want to do this is: destroying top object will wait for sub
1184 * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
1185 * to initiate top object destroying which may deadlock. See bz22520.
1186 */
1187static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
1188{
1189 struct lu_object_header *header = obj->co_lu.lo_header;
1190 wait_queue_t waiter;
1191
1192 if (unlikely(atomic_read(&header->loh_ref) != 1)) {
1193 struct lu_site *site = obj->co_lu.lo_dev->ld_site;
1194 struct lu_site_bkt_data *bkt;
1195
1196 bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
1197
9e795d35 1198 init_waitqueue_entry(&waiter, current);
d7e09d03
PT
1199 add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
1200
1201 while (1) {
1202 set_current_state(TASK_UNINTERRUPTIBLE);
1203 if (atomic_read(&header->loh_ref) == 1)
1204 break;
b3669a7f 1205 schedule();
d7e09d03
PT
1206 }
1207
1208 set_current_state(TASK_RUNNING);
1209 remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
1210 }
1211
1212 cl_object_put(env, obj);
1213}
1214
1215void cl_inode_fini(struct inode *inode)
1216{
1217 struct lu_env *env;
1218 struct cl_inode_info *lli = cl_i2info(inode);
1219 struct cl_object *clob = lli->lli_clob;
1220 int refcheck;
1221 int emergency;
1222
1223 if (clob != NULL) {
1224 void *cookie;
1225
1226 cookie = cl_env_reenter();
1227 env = cl_env_get(&refcheck);
1228 emergency = IS_ERR(env);
1229 if (emergency) {
1230 mutex_lock(&ccc_inode_fini_guard);
1231 LASSERT(ccc_inode_fini_env != NULL);
1232 cl_env_implant(ccc_inode_fini_env, &refcheck);
1233 env = ccc_inode_fini_env;
1234 }
1235 /*
1236 * cl_object cache is a slave to inode cache (which, in turn
1237 * is a slave to dentry cache), don't keep cl_object in memory
1238 * when its master is evicted.
1239 */
1240 cl_object_kill(env, clob);
1241 lu_object_ref_del(&clob->co_lu, "inode", inode);
1242 cl_object_put_last(env, clob);
1243 lli->lli_clob = NULL;
1244 if (emergency) {
1245 cl_env_unplant(ccc_inode_fini_env, &refcheck);
1246 mutex_unlock(&ccc_inode_fini_guard);
1247 } else
1248 cl_env_put(env, &refcheck);
1249 cl_env_reexit(cookie);
1250 }
1251}
1252
1253/**
1254 * return IF_* type for given lu_dirent entry.
1255 * IF_* flag shld be converted to particular OS file type in
1256 * platform llite module.
1257 */
1258__u16 ll_dirent_type_get(struct lu_dirent *ent)
1259{
1260 __u16 type = 0;
1261 struct luda_type *lt;
1262 int len = 0;
1263
1264 if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
1265 const unsigned align = sizeof(struct luda_type) - 1;
1266
1267 len = le16_to_cpu(ent->lde_namelen);
1268 len = (len + align) & ~align;
1269 lt = (void *)ent->lde_name + len;
1270 type = IFTODT(le16_to_cpu(lt->lt_type));
1271 }
1272 return type;
1273}
1274
1275/**
1276 * build inode number from passed @fid */
1277__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
1278{
1279 if (BITS_PER_LONG == 32 || api32)
0a3bdb00 1280 return fid_flatten32(fid);
d7e09d03 1281 else
0a3bdb00 1282 return fid_flatten(fid);
d7e09d03
PT
1283}
1284
1285/**
1286 * build inode generation from passed @fid. If our FID overflows the 32-bit
1287 * inode number then return a non-zero generation to distinguish them. */
1288__u32 cl_fid_build_gen(const struct lu_fid *fid)
1289{
1290 __u32 gen;
d7e09d03
PT
1291
1292 if (fid_is_igif(fid)) {
1293 gen = lu_igif_gen(fid);
0a3bdb00 1294 return gen;
d7e09d03
PT
1295 }
1296
1297 gen = (fid_flatten(fid) >> 32);
0a3bdb00 1298 return gen;
d7e09d03
PT
1299}
1300
1301/* lsm is unreliable after hsm implementation as layout can be changed at
1302 * any time. This is only to support old, non-clio-ized interfaces. It will
1303 * cause deadlock if clio operations are called with this extra layout refcount
1304 * because in case the layout changed during the IO, ll_layout_refresh() will
1305 * have to wait for the refcount to become zero to destroy the older layout.
1306 *
1307 * Notice that the lsm returned by this function may not be valid unless called
1308 * inside layout lock - MDS_INODELOCK_LAYOUT. */
1309struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
1310{
1311 return lov_lsm_get(cl_i2info(inode)->lli_clob);
1312}
1313
1314void inline ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
1315{
1316 lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);
1317}