staging/lustre/fld: Adjust NULL comparison codestyle
[linux-2.6-block.git] / drivers / staging / lustre / lustre / lclient / lcommon_cl.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * cl code shared between vvp and liblustre (and other Lustre clients in the
37 * future).
38 *
39 * Author: Nikita Danilov <nikita.danilov@sun.com>
40 */
41
42#define DEBUG_SUBSYSTEM S_LLITE
43
9fdaf8c0 44#include "../../include/linux/libcfs/libcfs.h"
d7e09d03
PT
45# include <linux/fs.h>
46# include <linux/sched.h>
47# include <linux/mm.h>
48# include <linux/quotaops.h>
49# include <linux/highmem.h>
50# include <linux/pagemap.h>
51# include <linux/rbtree.h>
52
67a235f5
GKH
53#include "../include/obd.h"
54#include "../include/obd_support.h"
55#include "../include/lustre_fid.h"
56#include "../include/lustre_lite.h"
57#include "../include/lustre_dlm.h"
58#include "../include/lustre_ver.h"
59#include "../include/lustre_mdc.h"
60#include "../include/cl_object.h"
d7e09d03 61
67a235f5 62#include "../include/lclient.h"
d7e09d03
PT
63
64#include "../llite/llite_internal.h"
65
2d95f10e 66static const struct cl_req_operations ccc_req_ops;
d7e09d03
PT
67
68/*
69 * ccc_ prefix stands for "Common Client Code".
70 */
71
72static struct kmem_cache *ccc_lock_kmem;
73static struct kmem_cache *ccc_object_kmem;
74static struct kmem_cache *ccc_thread_kmem;
75static struct kmem_cache *ccc_session_kmem;
76static struct kmem_cache *ccc_req_kmem;
77
78static struct lu_kmem_descr ccc_caches[] = {
79 {
80 .ckd_cache = &ccc_lock_kmem,
81 .ckd_name = "ccc_lock_kmem",
d0631921 82 .ckd_size = sizeof(struct ccc_lock)
d7e09d03
PT
83 },
84 {
85 .ckd_cache = &ccc_object_kmem,
86 .ckd_name = "ccc_object_kmem",
d0631921 87 .ckd_size = sizeof(struct ccc_object)
d7e09d03
PT
88 },
89 {
90 .ckd_cache = &ccc_thread_kmem,
91 .ckd_name = "ccc_thread_kmem",
d0631921 92 .ckd_size = sizeof(struct ccc_thread_info),
d7e09d03
PT
93 },
94 {
95 .ckd_cache = &ccc_session_kmem,
96 .ckd_name = "ccc_session_kmem",
d0631921 97 .ckd_size = sizeof(struct ccc_session)
d7e09d03
PT
98 },
99 {
100 .ckd_cache = &ccc_req_kmem,
101 .ckd_name = "ccc_req_kmem",
d0631921 102 .ckd_size = sizeof(struct ccc_req)
d7e09d03
PT
103 },
104 {
105 .ckd_cache = NULL
106 }
107};
108
109/*****************************************************************************
110 *
111 * Vvp device and device type functions.
112 *
113 */
114
0be19afa 115void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
d7e09d03
PT
116{
117 struct ccc_thread_info *info;
118
ccaabce1 119 info = kmem_cache_alloc(ccc_thread_kmem, GFP_NOFS | __GFP_ZERO);
d7e09d03
PT
120 if (info == NULL)
121 info = ERR_PTR(-ENOMEM);
122 return info;
123}
124
125void ccc_key_fini(const struct lu_context *ctx,
126 struct lu_context_key *key, void *data)
127{
128 struct ccc_thread_info *info = data;
772f34f5 129
50d30362 130 kmem_cache_free(ccc_thread_kmem, info);
d7e09d03
PT
131}
132
133void *ccc_session_key_init(const struct lu_context *ctx,
134 struct lu_context_key *key)
135{
136 struct ccc_session *session;
137
ccaabce1 138 session = kmem_cache_alloc(ccc_session_kmem, GFP_NOFS | __GFP_ZERO);
d7e09d03
PT
139 if (session == NULL)
140 session = ERR_PTR(-ENOMEM);
141 return session;
142}
143
144void ccc_session_key_fini(const struct lu_context *ctx,
145 struct lu_context_key *key, void *data)
146{
147 struct ccc_session *session = data;
772f34f5 148
50d30362 149 kmem_cache_free(ccc_session_kmem, session);
d7e09d03
PT
150}
151
152struct lu_context_key ccc_key = {
153 .lct_tags = LCT_CL_THREAD,
154 .lct_init = ccc_key_init,
155 .lct_fini = ccc_key_fini
156};
157
158struct lu_context_key ccc_session_key = {
159 .lct_tags = LCT_SESSION,
160 .lct_init = ccc_session_key_init,
161 .lct_fini = ccc_session_key_fini
162};
163
d7e09d03 164/* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
5152bda8 165/* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */
d7e09d03
PT
166
167int ccc_device_init(const struct lu_env *env, struct lu_device *d,
168 const char *name, struct lu_device *next)
169{
170 struct ccc_device *vdv;
171 int rc;
d7e09d03
PT
172
173 vdv = lu2ccc_dev(d);
174 vdv->cdv_next = lu2cl_dev(next);
175
176 LASSERT(d->ld_site != NULL && next->ld_type != NULL);
177 next->ld_site = d->ld_site;
178 rc = next->ld_type->ldt_ops->ldto_device_init(
179 env, next, next->ld_type->ldt_name, NULL);
180 if (rc == 0) {
181 lu_device_get(next);
182 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
183 }
0a3bdb00 184 return rc;
d7e09d03
PT
185}
186
187struct lu_device *ccc_device_fini(const struct lu_env *env,
188 struct lu_device *d)
189{
190 return cl2lu_dev(lu2ccc_dev(d)->cdv_next);
191}
192
193struct lu_device *ccc_device_alloc(const struct lu_env *env,
194 struct lu_device_type *t,
195 struct lustre_cfg *cfg,
196 const struct lu_device_operations *luops,
197 const struct cl_device_operations *clops)
198{
199 struct ccc_device *vdv;
200 struct lu_device *lud;
201 struct cl_site *site;
202 int rc;
d7e09d03 203
00c40c16 204 vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
c38ce354 205 if (!vdv)
0a3bdb00 206 return ERR_PTR(-ENOMEM);
d7e09d03
PT
207
208 lud = &vdv->cdv_cl.cd_lu_dev;
209 cl_device_init(&vdv->cdv_cl, t);
210 ccc2lu_dev(vdv)->ld_ops = luops;
211 vdv->cdv_cl.cd_ops = clops;
212
00c40c16 213 site = kzalloc(sizeof(*site), GFP_NOFS);
d7e09d03
PT
214 if (site != NULL) {
215 rc = cl_site_init(site, &vdv->cdv_cl);
216 if (rc == 0)
217 rc = lu_site_init_finish(&site->cs_lu);
218 else {
219 LASSERT(lud->ld_site == NULL);
220 CERROR("Cannot init lu_site, rc %d.\n", rc);
00c40c16 221 kfree(site);
d7e09d03
PT
222 }
223 } else
224 rc = -ENOMEM;
225 if (rc != 0) {
226 ccc_device_free(env, lud);
227 lud = ERR_PTR(rc);
228 }
0a3bdb00 229 return lud;
d7e09d03
PT
230}
231
232struct lu_device *ccc_device_free(const struct lu_env *env,
233 struct lu_device *d)
234{
235 struct ccc_device *vdv = lu2ccc_dev(d);
236 struct cl_site *site = lu2cl_site(d->ld_site);
237 struct lu_device *next = cl2lu_dev(vdv->cdv_next);
238
239 if (d->ld_site != NULL) {
240 cl_site_fini(site);
00c40c16 241 kfree(site);
d7e09d03
PT
242 }
243 cl_device_fini(lu2cl_dev(d));
00c40c16 244 kfree(vdv);
d7e09d03
PT
245 return next;
246}
247
248int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
249 struct cl_req *req)
250{
251 struct ccc_req *vrq;
252 int result;
253
ccaabce1 254 vrq = kmem_cache_alloc(ccc_req_kmem, GFP_NOFS | __GFP_ZERO);
d7e09d03
PT
255 if (vrq != NULL) {
256 cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
257 result = 0;
258 } else
259 result = -ENOMEM;
260 return result;
261}
262
263/**
264 * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
265 * fails. Access to this environment is serialized by ccc_inode_fini_guard
266 * mutex.
267 */
847d47fa 268static struct lu_env *ccc_inode_fini_env;
d7e09d03
PT
269
270/**
271 * A mutex serializing calls to slp_inode_fini() under extreme memory
272 * pressure, when environments cannot be allocated.
273 */
274static DEFINE_MUTEX(ccc_inode_fini_guard);
275static int dummy_refcheck;
276
277int ccc_global_init(struct lu_device_type *device_type)
278{
279 int result;
280
281 result = lu_kmem_init(ccc_caches);
282 if (result)
283 return result;
284
285 result = lu_device_type_init(device_type);
286 if (result)
287 goto out_kmem;
288
289 ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
290 LCT_REMEMBER|LCT_NOREF);
291 if (IS_ERR(ccc_inode_fini_env)) {
292 result = PTR_ERR(ccc_inode_fini_env);
293 goto out_device;
294 }
295
296 ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
297 return 0;
298out_device:
299 lu_device_type_fini(device_type);
300out_kmem:
301 lu_kmem_fini(ccc_caches);
302 return result;
303}
304
305void ccc_global_fini(struct lu_device_type *device_type)
306{
307 if (ccc_inode_fini_env != NULL) {
308 cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
309 ccc_inode_fini_env = NULL;
310 }
311 lu_device_type_fini(device_type);
312 lu_kmem_fini(ccc_caches);
313}
314
315/*****************************************************************************
316 *
317 * Object operations.
318 *
319 */
320
321struct lu_object *ccc_object_alloc(const struct lu_env *env,
322 const struct lu_object_header *unused,
323 struct lu_device *dev,
324 const struct cl_object_operations *clops,
325 const struct lu_object_operations *luops)
326{
327 struct ccc_object *vob;
328 struct lu_object *obj;
329
ccaabce1 330 vob = kmem_cache_alloc(ccc_object_kmem, GFP_NOFS | __GFP_ZERO);
d7e09d03
PT
331 if (vob != NULL) {
332 struct cl_object_header *hdr;
333
334 obj = ccc2lu(vob);
335 hdr = &vob->cob_header;
336 cl_object_header_init(hdr);
337 lu_object_init(obj, &hdr->coh_lu, dev);
338 lu_object_add_top(&hdr->coh_lu, obj);
339
340 vob->cob_cl.co_ops = clops;
341 obj->lo_ops = luops;
342 } else
343 obj = NULL;
344 return obj;
345}
346
347int ccc_object_init0(const struct lu_env *env,
348 struct ccc_object *vob,
349 const struct cl_object_conf *conf)
350{
351 vob->cob_inode = conf->coc_inode;
352 vob->cob_transient_pages = 0;
353 cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
354 return 0;
355}
356
357int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
358 const struct lu_object_conf *conf)
359{
360 struct ccc_device *dev = lu2ccc_dev(obj->lo_dev);
361 struct ccc_object *vob = lu2ccc(obj);
362 struct lu_object *below;
363 struct lu_device *under;
364 int result;
365
366 under = &dev->cdv_next->cd_lu_dev;
367 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
368 if (below != NULL) {
369 const struct cl_object_conf *cconf;
370
371 cconf = lu2cl_conf(conf);
372 INIT_LIST_HEAD(&vob->cob_pending_list);
373 lu_object_add(obj, below);
374 result = ccc_object_init0(env, vob, cconf);
375 } else
376 result = -ENOMEM;
377 return result;
378}
379
380void ccc_object_free(const struct lu_env *env, struct lu_object *obj)
381{
382 struct ccc_object *vob = lu2ccc(obj);
383
384 lu_object_fini(obj);
385 lu_object_header_fini(obj->lo_header);
50d30362 386 kmem_cache_free(ccc_object_kmem, vob);
d7e09d03
PT
387}
388
389int ccc_lock_init(const struct lu_env *env,
390 struct cl_object *obj, struct cl_lock *lock,
391 const struct cl_io *unused,
392 const struct cl_lock_operations *lkops)
393{
394 struct ccc_lock *clk;
395 int result;
396
397 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
398
ccaabce1 399 clk = kmem_cache_alloc(ccc_lock_kmem, GFP_NOFS | __GFP_ZERO);
d7e09d03
PT
400 if (clk != NULL) {
401 cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
402 result = 0;
403 } else
404 result = -ENOMEM;
405 return result;
406}
407
d7e09d03
PT
408int ccc_object_glimpse(const struct lu_env *env,
409 const struct cl_object *obj, struct ost_lvb *lvb)
410{
411 struct inode *inode = ccc_object_inode(obj);
412
d7e09d03
PT
413 lvb->lvb_mtime = cl_inode_mtime(inode);
414 lvb->lvb_atime = cl_inode_atime(inode);
415 lvb->lvb_ctime = cl_inode_ctime(inode);
416 /*
417 * LU-417: Add dirty pages block count lest i_blocks reports 0, some
418 * "cp" or "tar" on remote node may think it's a completely sparse file
419 * and skip it.
420 */
421 if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
422 lvb->lvb_blocks = dirty_cnt(inode);
0a3bdb00 423 return 0;
d7e09d03
PT
424}
425
d7e09d03
PT
426static void ccc_object_size_lock(struct cl_object *obj)
427{
428 struct inode *inode = ccc_object_inode(obj);
429
abcf8080 430 ll_inode_size_lock(inode);
d7e09d03
PT
431 cl_object_attr_lock(obj);
432}
433
434static void ccc_object_size_unlock(struct cl_object *obj)
435{
436 struct inode *inode = ccc_object_inode(obj);
437
438 cl_object_attr_unlock(obj);
abcf8080 439 ll_inode_size_unlock(inode);
d7e09d03
PT
440}
441
442/*****************************************************************************
443 *
444 * Page operations.
445 *
446 */
447
448struct page *ccc_page_vmpage(const struct lu_env *env,
449 const struct cl_page_slice *slice)
450{
451 return cl2vm_page(slice);
452}
453
454int ccc_page_is_under_lock(const struct lu_env *env,
455 const struct cl_page_slice *slice,
456 struct cl_io *io)
457{
458 struct ccc_io *cio = ccc_env_io(env);
459 struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
460 struct cl_page *page = slice->cpl_page;
461
462 int result;
463
d7e09d03
PT
464 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
465 io->ci_type == CIT_FAULT) {
466 if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
467 result = -EBUSY;
468 else {
469 desc->cld_start = page->cp_index;
470 desc->cld_end = page->cp_index;
471 desc->cld_obj = page->cp_obj;
472 desc->cld_mode = CLM_READ;
473 result = cl_queue_match(&io->ci_lockset.cls_done,
474 desc) ? -EBUSY : 0;
475 }
476 } else
477 result = 0;
0a3bdb00 478 return result;
d7e09d03
PT
479}
480
481int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
482{
483 /*
484 * Cached read?
485 */
486 LBUG();
487 return 0;
488}
489
d7e09d03
PT
490int ccc_transient_page_prep(const struct lu_env *env,
491 const struct cl_page_slice *slice,
492 struct cl_io *unused)
493{
d7e09d03 494 /* transient page should always be sent. */
0a3bdb00 495 return 0;
d7e09d03
PT
496}
497
498/*****************************************************************************
499 *
500 * Lock operations.
501 *
502 */
503
504void ccc_lock_delete(const struct lu_env *env,
505 const struct cl_lock_slice *slice)
506{
507 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
508}
509
510void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
511{
512 struct ccc_lock *clk = cl2ccc_lock(slice);
772f34f5 513
50d30362 514 kmem_cache_free(ccc_lock_kmem, clk);
d7e09d03
PT
515}
516
517int ccc_lock_enqueue(const struct lu_env *env,
518 const struct cl_lock_slice *slice,
519 struct cl_io *unused, __u32 enqflags)
4c309612
JX
520{
521 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
522 return 0;
523}
524
525int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice)
d7e09d03
PT
526{
527 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
528 return 0;
529}
530
531int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
532{
533 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
534 return 0;
535}
536
537int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice)
538{
539 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
540 return 0;
541}
542
543/**
544 * Implementation of cl_lock_operations::clo_fits_into() methods for ccc
545 * layer. This function is executed every time io finds an existing lock in
546 * the lock cache while creating new lock. This function has to decide whether
547 * cached lock "fits" into io.
548 *
549 * \param slice lock to be checked
550 * \param io IO that wants a lock.
551 *
552 * \see lov_lock_fits_into().
553 */
554int ccc_lock_fits_into(const struct lu_env *env,
555 const struct cl_lock_slice *slice,
556 const struct cl_lock_descr *need,
557 const struct cl_io *io)
558{
559 const struct cl_lock *lock = slice->cls_lock;
560 const struct cl_lock_descr *descr = &lock->cll_descr;
561 const struct ccc_io *cio = ccc_env_io(env);
562 int result;
563
d7e09d03
PT
564 /*
565 * Work around DLM peculiarity: it assumes that glimpse
566 * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
567 * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make
568 * sure that glimpse doesn't get CLM_WRITE top-lock, so that it
569 * doesn't enqueue CLM_WRITE sub-locks.
570 */
571 if (cio->cui_glimpse)
572 result = descr->cld_mode != CLM_WRITE;
573
574 /*
575 * Also, don't match incomplete write locks for read, otherwise read
576 * would enqueue missing sub-locks in the write mode.
577 */
578 else if (need->cld_mode != descr->cld_mode)
579 result = lock->cll_state >= CLS_ENQUEUED;
580 else
581 result = 1;
0a3bdb00 582 return result;
d7e09d03
PT
583}
584
585/**
586 * Implements cl_lock_operations::clo_state() method for ccc layer, invoked
587 * whenever lock state changes. Transfers object attributes, that might be
588 * updated as a result of lock acquiring into inode.
589 */
590void ccc_lock_state(const struct lu_env *env,
591 const struct cl_lock_slice *slice,
592 enum cl_lock_state state)
593{
594 struct cl_lock *lock = slice->cls_lock;
d7e09d03
PT
595
596 /*
597 * Refresh inode attributes when the lock is moving into CLS_HELD
598 * state, and only when this is a result of real enqueue, rather than
599 * of finding lock in the cache.
600 */
601 if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
602 struct cl_object *obj;
603 struct inode *inode;
604
605 obj = slice->cls_obj;
606 inode = ccc_object_inode(obj);
607
608 /* vmtruncate() sets the i_size
609 * under both a DLM lock and the
610 * ll_inode_size_lock(). If we don't get the
611 * ll_inode_size_lock() here we can match the DLM lock and
612 * reset i_size. generic_file_write can then trust the
613 * stale i_size when doing appending writes and effectively
614 * cancel the result of the truncate. Getting the
615 * ll_inode_size_lock() after the enqueue maintains the DLM
616 * -> ll_inode_size_lock() acquiring order. */
617 if (lock->cll_descr.cld_start == 0 &&
618 lock->cll_descr.cld_end == CL_PAGE_EOF)
619 cl_merge_lvb(env, inode);
620 }
d7e09d03
PT
621}
622
623/*****************************************************************************
624 *
625 * io operations.
626 *
627 */
628
d7e09d03
PT
629int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
630 __u32 enqflags, enum cl_lock_mode mode,
631 pgoff_t start, pgoff_t end)
632{
633 struct ccc_io *cio = ccc_env_io(env);
634 struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
635 struct cl_object *obj = io->ci_obj;
636
637 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
d7e09d03
PT
638
639 CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
640
ec83e611 641 memset(&cio->cui_link, 0, sizeof(cio->cui_link));
d7e09d03
PT
642
643 if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
644 descr->cld_mode = CLM_GROUP;
645 descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
646 } else {
647 descr->cld_mode = mode;
648 }
649 descr->cld_obj = obj;
650 descr->cld_start = start;
651 descr->cld_end = end;
652 descr->cld_enq_flags = enqflags;
653
654 cl_io_lock_add(env, io, &cio->cui_link);
0a3bdb00 655 return 0;
d7e09d03
PT
656}
657
658void ccc_io_update_iov(const struct lu_env *env,
659 struct ccc_io *cio, struct cl_io *io)
660{
d7e09d03
PT
661 size_t size = io->u.ci_rw.crw_count;
662
b42b15fd 663 if (!cl_is_normalio(env, io) || cio->cui_iter == NULL)
d7e09d03
PT
664 return;
665
b42b15fd 666 iov_iter_truncate(cio->cui_iter, size);
d7e09d03
PT
667}
668
669int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
670 __u32 enqflags, enum cl_lock_mode mode,
671 loff_t start, loff_t end)
672{
673 struct cl_object *obj = io->ci_obj;
772f34f5 674
d7e09d03
PT
675 return ccc_io_one_lock_index(env, io, enqflags, mode,
676 cl_index(obj, start), cl_index(obj, end));
677}
678
679void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
680{
681 CLOBINVRNT(env, ios->cis_io->ci_obj,
682 ccc_object_invariant(ios->cis_io->ci_obj));
683}
684
685void ccc_io_advance(const struct lu_env *env,
686 const struct cl_io_slice *ios,
687 size_t nob)
688{
689 struct ccc_io *cio = cl2ccc_io(env, ios);
690 struct cl_io *io = ios->cis_io;
691 struct cl_object *obj = ios->cis_io->ci_obj;
692
693 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
694
695 if (!cl_is_normalio(env, io))
696 return;
697
b42b15fd 698 iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count -= nob);
d7e09d03
PT
699}
700
701/**
702 * Helper function that if necessary adjusts file size (inode->i_size), when
703 * position at the offset \a pos is accessed. File size can be arbitrary stale
704 * on a Lustre client, but client at least knows KMS. If accessed area is
705 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
706 *
707 * Locking: cl_isize_lock is used to serialize changes to inode size and to
708 * protect consistency between inode size and cl_object
709 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
710 * top-object and sub-objects.
711 */
712int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
713 struct cl_io *io, loff_t start, size_t count, int *exceed)
714{
715 struct cl_attr *attr = ccc_env_thread_attr(env);
716 struct inode *inode = ccc_object_inode(obj);
717 loff_t pos = start + count - 1;
718 loff_t kms;
719 int result;
720
721 /*
722 * Consistency guarantees: following possibilities exist for the
723 * relation between region being accessed and real file size at this
724 * moment:
725 *
726 * (A): the region is completely inside of the file;
727 *
728 * (B-x): x bytes of region are inside of the file, the rest is
729 * outside;
730 *
731 * (C): the region is completely outside of the file.
732 *
733 * This classification is stable under DLM lock already acquired by
734 * the caller, because to change the class, other client has to take
735 * DLM lock conflicting with our lock. Also, any updates to ->i_size
736 * by other threads on this client are serialized by
737 * ll_inode_size_lock(). This guarantees that short reads are handled
738 * correctly in the face of concurrent writes and truncates.
739 */
740 ccc_object_size_lock(obj);
741 result = cl_object_attr_get(env, obj, attr);
742 if (result == 0) {
743 kms = attr->cat_kms;
744 if (pos > kms) {
745 /*
746 * A glimpse is necessary to determine whether we
747 * return a short read (B) or some zeroes at the end
748 * of the buffer (C)
749 */
750 ccc_object_size_unlock(obj);
751 result = cl_glimpse_lock(env, io, inode, obj, 0);
752 if (result == 0 && exceed != NULL) {
753 /* If objective page index exceed end-of-file
754 * page index, return directly. Do not expect
755 * kernel will check such case correctly.
756 * linux-2.6.18-128.1.1 miss to do that.
757 * --bug 17336 */
758 loff_t size = cl_isize_read(inode);
81851d46 759 loff_t cur_index = start >> PAGE_CACHE_SHIFT;
159dc4bf
AM
760 loff_t size_index = (size - 1) >>
761 PAGE_CACHE_SHIFT;
d7e09d03
PT
762
763 if ((size == 0 && cur_index != 0) ||
81851d46 764 size_index < cur_index)
98e11370 765 *exceed = 1;
d7e09d03
PT
766 }
767 return result;
109a1624
AG
768 }
769 /*
770 * region is within kms and, hence, within real file
771 * size (A). We need to increase i_size to cover the
772 * read region so that generic_file_read() will do its
773 * job, but that doesn't mean the kms size is
774 * _correct_, it is only the _minimum_ size. If
775 * someone does a stat they will get the correct size
776 * which will always be >= the kms value here.
777 * b=11081
778 */
779 if (cl_isize_read(inode) < kms) {
780 cl_isize_write_nolock(inode, kms);
781 CDEBUG(D_VFSTRACE,
782 DFID" updating i_size %llu\n",
783 PFID(lu_object_fid(&obj->co_lu)),
784 (__u64)cl_isize_read(inode));
d7e09d03 785
d7e09d03
PT
786 }
787 }
788 ccc_object_size_unlock(obj);
789 return result;
790}
791
792/*****************************************************************************
793 *
794 * Transfer operations.
795 *
796 */
797
798void ccc_req_completion(const struct lu_env *env,
799 const struct cl_req_slice *slice, int ioret)
800{
801 struct ccc_req *vrq;
802
803 if (ioret > 0)
804 cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
805
806 vrq = cl2ccc_req(slice);
50d30362 807 kmem_cache_free(ccc_req_kmem, vrq);
d7e09d03
PT
808}
809
810/**
811 * Implementation of struct cl_req_operations::cro_attr_set() for ccc
812 * layer. ccc is responsible for
813 *
814 * - o_[mac]time
815 *
816 * - o_mode
817 *
818 * - o_parent_seq
819 *
820 * - o_[ug]id
821 *
822 * - o_parent_oid
823 *
824 * - o_parent_ver
825 *
826 * - o_ioepoch,
827 *
d7e09d03
PT
828 */
829void ccc_req_attr_set(const struct lu_env *env,
830 const struct cl_req_slice *slice,
831 const struct cl_object *obj,
21aef7d9 832 struct cl_req_attr *attr, u64 flags)
d7e09d03
PT
833{
834 struct inode *inode;
835 struct obdo *oa;
21aef7d9 836 u32 valid_flags;
d7e09d03
PT
837
838 oa = attr->cra_oa;
839 inode = ccc_object_inode(obj);
840 valid_flags = OBD_MD_FLTYPE;
841
d7e09d03
PT
842 if (slice->crs_req->crq_type == CRT_WRITE) {
843 if (flags & OBD_MD_FLEPOCH) {
844 oa->o_valid |= OBD_MD_FLEPOCH;
845 oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
846 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
847 OBD_MD_FLUID | OBD_MD_FLGID;
848 }
849 }
850 obdo_from_inode(oa, inode, valid_flags & flags);
851 obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
852 memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
853 JOBSTATS_JOBID_SIZE);
854}
855
2d95f10e 856static const struct cl_req_operations ccc_req_ops = {
d7e09d03
PT
857 .cro_attr_set = ccc_req_attr_set,
858 .cro_completion = ccc_req_completion
859};
860
ef2e0f55 861int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
d7e09d03
PT
862{
863 struct lu_env *env;
864 struct cl_io *io;
865 int result;
866 int refcheck;
867
d7e09d03
PT
868 env = cl_env_get(&refcheck);
869 if (IS_ERR(env))
0a3bdb00 870 return PTR_ERR(env);
d7e09d03
PT
871
872 io = ccc_env_thread_io(env);
873 io->ci_obj = cl_i2info(inode)->lli_clob;
874
875 io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
876 io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
877 io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
878 io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
879 io->u.ci_setattr.sa_valid = attr->ia_valid;
d7e09d03
PT
880
881again:
882 if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
883 struct ccc_io *cio = ccc_env_io(env);
884
885 if (attr->ia_valid & ATTR_FILE)
886 /* populate the file descriptor for ftruncate to honor
887 * group lock - see LU-787 */
888 cio->cui_fd = cl_iattr2fd(inode, attr);
889
890 result = cl_io_loop(env, io);
891 } else {
892 result = io->ci_result;
893 }
894 cl_io_fini(env, io);
895 if (unlikely(io->ci_need_restart))
896 goto again;
5ea17d6c
JL
897 /* HSM import case: file is released, cannot be restored
898 * no need to fail except if restore registration failed
899 * with -ENODATA */
900 if (result == -ENODATA && io->ci_restore_needed &&
901 io->ci_result != -ENODATA)
902 result = 0;
d7e09d03 903 cl_env_put(env, &refcheck);
0a3bdb00 904 return result;
d7e09d03
PT
905}
906
907/*****************************************************************************
908 *
909 * Type conversions.
910 *
911 */
912
913struct lu_device *ccc2lu_dev(struct ccc_device *vdv)
914{
915 return &vdv->cdv_cl.cd_lu_dev;
916}
917
918struct ccc_device *lu2ccc_dev(const struct lu_device *d)
919{
920 return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev);
921}
922
923struct ccc_device *cl2ccc_dev(const struct cl_device *d)
924{
925 return container_of0(d, struct ccc_device, cdv_cl);
926}
927
928struct lu_object *ccc2lu(struct ccc_object *vob)
929{
930 return &vob->cob_cl.co_lu;
931}
932
933struct ccc_object *lu2ccc(const struct lu_object *obj)
934{
935 return container_of0(obj, struct ccc_object, cob_cl.co_lu);
936}
937
938struct ccc_object *cl2ccc(const struct cl_object *obj)
939{
940 return container_of0(obj, struct ccc_object, cob_cl);
941}
942
943struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
944{
945 return container_of(slice, struct ccc_lock, clk_cl);
946}
947
948struct ccc_io *cl2ccc_io(const struct lu_env *env,
949 const struct cl_io_slice *slice)
950{
951 struct ccc_io *cio;
952
953 cio = container_of(slice, struct ccc_io, cui_cl);
954 LASSERT(cio == ccc_env_io(env));
955 return cio;
956}
957
958struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
959{
960 return container_of0(slice, struct ccc_req, crq_cl);
961}
962
963struct page *cl2vm_page(const struct cl_page_slice *slice)
964{
965 return cl2ccc_page(slice)->cpg_page;
966}
967
968/*****************************************************************************
969 *
970 * Accessors.
971 *
972 */
973int ccc_object_invariant(const struct cl_object *obj)
974{
975 struct inode *inode = ccc_object_inode(obj);
976 struct cl_inode_info *lli = cl_i2info(inode);
977
978 return (S_ISREG(cl_inode_mode(inode)) ||
979 /* i_mode of unlinked inode is zeroed. */
980 cl_inode_mode(inode) == 0) && lli->lli_clob == obj;
981}
982
983struct inode *ccc_object_inode(const struct cl_object *obj)
984{
985 return cl2ccc(obj)->cob_inode;
986}
987
d7e09d03
PT
988/**
989 * Initialize or update CLIO structures for regular files when new
990 * meta-data arrives from the server.
991 *
992 * \param inode regular file inode
993 * \param md new file metadata from MDS
994 * - allocates cl_object if necessary,
995 * - updated layout, if object was already here.
996 */
997int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
998{
999 struct lu_env *env;
1000 struct cl_inode_info *lli;
1001 struct cl_object *clob;
1002 struct lu_site *site;
1003 struct lu_fid *fid;
1004 struct cl_object_conf conf = {
1005 .coc_inode = inode,
1006 .u = {
1007 .coc_md = md
1008 }
1009 };
1010 int result = 0;
1011 int refcheck;
1012
1013 LASSERT(md->body->valid & OBD_MD_FLID);
1014 LASSERT(S_ISREG(cl_inode_mode(inode)));
1015
1016 env = cl_env_get(&refcheck);
1017 if (IS_ERR(env))
1018 return PTR_ERR(env);
1019
1020 site = cl_i2sbi(inode)->ll_site;
1021 lli = cl_i2info(inode);
1022 fid = &lli->lli_fid;
1023 LASSERT(fid_is_sane(fid));
1024
1025 if (lli->lli_clob == NULL) {
1026 /* clob is slave of inode, empty lli_clob means for new inode,
1027 * there is no clob in cache with the given fid, so it is
1028 * unnecessary to perform lookup-alloc-lookup-insert, just
1029 * alloc and insert directly. */
1030 LASSERT(inode->i_state & I_NEW);
1031 conf.coc_lu.loc_flags = LOC_F_NEW;
1032 clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
1033 fid, &conf);
1034 if (!IS_ERR(clob)) {
1035 /*
1036 * No locking is necessary, as new inode is
1037 * locked by I_NEW bit.
1038 */
1039 lli->lli_clob = clob;
5dd16419 1040 lli->lli_has_smd = lsm_has_objects(md->lsm);
d7e09d03
PT
1041 lu_object_ref_add(&clob->co_lu, "inode", inode);
1042 } else
1043 result = PTR_ERR(clob);
1044 } else {
1045 result = cl_conf_set(env, lli->lli_clob, &conf);
1046 }
1047
1048 cl_env_put(env, &refcheck);
1049
1050 if (result != 0)
1051 CERROR("Failure to initialize cl object "DFID": %d\n",
1052 PFID(fid), result);
1053 return result;
1054}
1055
1056/**
1057 * Wait for others drop their references of the object at first, then we drop
1058 * the last one, which will lead to the object be destroyed immediately.
1059 * Must be called after cl_object_kill() against this object.
1060 *
1061 * The reason we want to do this is: destroying top object will wait for sub
1062 * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
1063 * to initiate top object destroying which may deadlock. See bz22520.
1064 */
1065static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
1066{
1067 struct lu_object_header *header = obj->co_lu.lo_header;
1068 wait_queue_t waiter;
1069
1070 if (unlikely(atomic_read(&header->loh_ref) != 1)) {
1071 struct lu_site *site = obj->co_lu.lo_dev->ld_site;
1072 struct lu_site_bkt_data *bkt;
1073
1074 bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
1075
9e795d35 1076 init_waitqueue_entry(&waiter, current);
d7e09d03
PT
1077 add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
1078
1079 while (1) {
1080 set_current_state(TASK_UNINTERRUPTIBLE);
1081 if (atomic_read(&header->loh_ref) == 1)
1082 break;
b3669a7f 1083 schedule();
d7e09d03
PT
1084 }
1085
1086 set_current_state(TASK_RUNNING);
1087 remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
1088 }
1089
1090 cl_object_put(env, obj);
1091}
1092
1093void cl_inode_fini(struct inode *inode)
1094{
1095 struct lu_env *env;
1096 struct cl_inode_info *lli = cl_i2info(inode);
1097 struct cl_object *clob = lli->lli_clob;
1098 int refcheck;
1099 int emergency;
1100
1101 if (clob != NULL) {
1102 void *cookie;
1103
1104 cookie = cl_env_reenter();
1105 env = cl_env_get(&refcheck);
1106 emergency = IS_ERR(env);
1107 if (emergency) {
1108 mutex_lock(&ccc_inode_fini_guard);
1109 LASSERT(ccc_inode_fini_env != NULL);
1110 cl_env_implant(ccc_inode_fini_env, &refcheck);
1111 env = ccc_inode_fini_env;
1112 }
1113 /*
1114 * cl_object cache is a slave to inode cache (which, in turn
1115 * is a slave to dentry cache), don't keep cl_object in memory
1116 * when its master is evicted.
1117 */
1118 cl_object_kill(env, clob);
1119 lu_object_ref_del(&clob->co_lu, "inode", inode);
1120 cl_object_put_last(env, clob);
1121 lli->lli_clob = NULL;
1122 if (emergency) {
1123 cl_env_unplant(ccc_inode_fini_env, &refcheck);
1124 mutex_unlock(&ccc_inode_fini_guard);
1125 } else
1126 cl_env_put(env, &refcheck);
1127 cl_env_reexit(cookie);
1128 }
1129}
1130
1131/**
1132 * return IF_* type for given lu_dirent entry.
1133 * IF_* flag shld be converted to particular OS file type in
1134 * platform llite module.
1135 */
1136__u16 ll_dirent_type_get(struct lu_dirent *ent)
1137{
1138 __u16 type = 0;
1139 struct luda_type *lt;
1140 int len = 0;
1141
1142 if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
1143 const unsigned align = sizeof(struct luda_type) - 1;
1144
1145 len = le16_to_cpu(ent->lde_namelen);
1146 len = (len + align) & ~align;
1147 lt = (void *)ent->lde_name + len;
1148 type = IFTODT(le16_to_cpu(lt->lt_type));
1149 }
1150 return type;
1151}
1152
1153/**
1154 * build inode number from passed @fid */
1155__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
1156{
1157 if (BITS_PER_LONG == 32 || api32)
0a3bdb00 1158 return fid_flatten32(fid);
d7e09d03 1159 else
0a3bdb00 1160 return fid_flatten(fid);
d7e09d03
PT
1161}
1162
1163/**
1164 * build inode generation from passed @fid. If our FID overflows the 32-bit
1165 * inode number then return a non-zero generation to distinguish them. */
1166__u32 cl_fid_build_gen(const struct lu_fid *fid)
1167{
1168 __u32 gen;
d7e09d03
PT
1169
1170 if (fid_is_igif(fid)) {
1171 gen = lu_igif_gen(fid);
0a3bdb00 1172 return gen;
d7e09d03
PT
1173 }
1174
159dc4bf 1175 gen = fid_flatten(fid) >> 32;
0a3bdb00 1176 return gen;
d7e09d03
PT
1177}
1178
1179/* lsm is unreliable after hsm implementation as layout can be changed at
1180 * any time. This is only to support old, non-clio-ized interfaces. It will
1181 * cause deadlock if clio operations are called with this extra layout refcount
1182 * because in case the layout changed during the IO, ll_layout_refresh() will
1183 * have to wait for the refcount to become zero to destroy the older layout.
1184 *
1185 * Notice that the lsm returned by this function may not be valid unless called
1186 * inside layout lock - MDS_INODELOCK_LAYOUT. */
1187struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
1188{
1189 return lov_lsm_get(cl_i2info(inode)->lli_clob);
1190}
1191
60dc39b4 1192inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
d7e09d03
PT
1193{
1194 lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);
1195}