staging/lustre/debug: quiet noisy console error messages
[linux-2.6-block.git] / drivers / staging / lustre / lustre / llite / llite_lib.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/llite/llite_lib.c
37 *
38 * Lustre Light Super operations
39 */
40
41#define DEBUG_SUBSYSTEM S_LLITE
42
43#include <linux/module.h>
44#include <linux/types.h>
45#include <linux/version.h>
46#include <linux/mm.h>
47
48#include <lustre_lite.h>
49#include <lustre_ha.h>
50#include <lustre_dlm.h>
51#include <lprocfs_status.h>
52#include <lustre_disk.h>
53#include <lustre_param.h>
54#include <lustre_log.h>
55#include <cl_object.h>
56#include <obd_cksum.h>
57#include "llite_internal.h"
58
59struct kmem_cache *ll_file_data_slab;
60
61LIST_HEAD(ll_super_blocks);
62DEFINE_SPINLOCK(ll_sb_lock);
63
64#ifndef MS_HAS_NEW_AOPS
65extern struct address_space_operations ll_aops;
66#else
67extern struct address_space_operations_ext ll_aops;
68#endif
69
70#ifndef log2
71#define log2(n) ffz(~(n))
72#endif
73
74static struct ll_sb_info *ll_init_sbi(void)
75{
76 struct ll_sb_info *sbi = NULL;
77 unsigned long pages;
78 unsigned long lru_page_max;
79 struct sysinfo si;
80 class_uuid_t uuid;
81 int i;
82 ENTRY;
83
84 OBD_ALLOC(sbi, sizeof(*sbi));
85 if (!sbi)
86 RETURN(NULL);
87
88 spin_lock_init(&sbi->ll_lock);
89 mutex_init(&sbi->ll_lco.lco_lock);
90 spin_lock_init(&sbi->ll_pp_extent_lock);
91 spin_lock_init(&sbi->ll_process_lock);
92 sbi->ll_rw_stats_on = 0;
93
94 si_meminfo(&si);
95 pages = si.totalram - si.totalhigh;
96 if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) {
97 lru_page_max = pages / 2;
98 } else {
99 lru_page_max = (pages / 4) * 3;
100 }
101
102 /* initialize ll_cache data */
103 atomic_set(&sbi->ll_cache.ccc_users, 0);
104 sbi->ll_cache.ccc_lru_max = lru_page_max;
105 atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
106 spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
107 INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
108
109 atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
110 init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
111
112 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
113 SBI_DEFAULT_READAHEAD_MAX);
114 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
115 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
116 SBI_DEFAULT_READAHEAD_WHOLE_MAX;
117 INIT_LIST_HEAD(&sbi->ll_conn_chain);
118 INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
119
120 ll_generate_random_uuid(uuid);
121 class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
122 CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
123
124 spin_lock(&ll_sb_lock);
125 list_add_tail(&sbi->ll_list, &ll_super_blocks);
126 spin_unlock(&ll_sb_lock);
127
128 sbi->ll_flags |= LL_SBI_VERBOSE;
129 sbi->ll_flags |= LL_SBI_CHECKSUM;
130
131 sbi->ll_flags |= LL_SBI_LRU_RESIZE;
132
133 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
134 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
135 pp_r_hist.oh_lock);
136 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
137 pp_w_hist.oh_lock);
138 }
139
140 /* metadata statahead is enabled by default */
141 sbi->ll_sa_max = LL_SA_RPC_DEF;
142 atomic_set(&sbi->ll_sa_total, 0);
143 atomic_set(&sbi->ll_sa_wrong, 0);
144 atomic_set(&sbi->ll_agl_total, 0);
145 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
146
147 RETURN(sbi);
148}
149
150void ll_free_sbi(struct super_block *sb)
151{
152 struct ll_sb_info *sbi = ll_s2sbi(sb);
153 ENTRY;
154
155 if (sbi != NULL) {
156 spin_lock(&ll_sb_lock);
157 list_del(&sbi->ll_list);
158 spin_unlock(&ll_sb_lock);
159 OBD_FREE(sbi, sizeof(*sbi));
160 }
161 EXIT;
162}
163
164static struct dentry_operations ll_d_root_ops = {
165 .d_compare = ll_dcompare,
166 .d_revalidate = ll_revalidate_nd,
167};
168
169static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
170 struct vfsmount *mnt)
171{
172 struct inode *root = 0;
173 struct ll_sb_info *sbi = ll_s2sbi(sb);
174 struct obd_device *obd;
175 struct obd_capa *oc = NULL;
176 struct obd_statfs *osfs = NULL;
177 struct ptlrpc_request *request = NULL;
178 struct obd_connect_data *data = NULL;
179 struct obd_uuid *uuid;
180 struct md_op_data *op_data;
181 struct lustre_md lmd;
182 obd_valid valid;
183 int size, err, checksum;
184 ENTRY;
185
186 obd = class_name2obd(md);
187 if (!obd) {
188 CERROR("MD %s: not setup or attached\n", md);
189 RETURN(-EINVAL);
190 }
191
192 OBD_ALLOC_PTR(data);
193 if (data == NULL)
194 RETURN(-ENOMEM);
195
196 OBD_ALLOC_PTR(osfs);
197 if (osfs == NULL) {
198 OBD_FREE_PTR(data);
199 RETURN(-ENOMEM);
200 }
201
202 if (proc_lustre_fs_root) {
203 err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
204 dt, md);
205 if (err < 0)
206 CERROR("could not register mount in /proc/fs/lustre\n");
207 }
208
209 /* indicate the features supported by this client */
210 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
211 OBD_CONNECT_ATTRFID |
212 OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
213 OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
214 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
215 OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
216 OBD_CONNECT_RMT_CLIENT | OBD_CONNECT_VBR |
217 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
218 OBD_CONNECT_EINPROGRESS |
219 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
220 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
221
222 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
223 data->ocd_connect_flags |= OBD_CONNECT_SOM;
224
225 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
226 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
227#ifdef CONFIG_FS_POSIX_ACL
228 data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
229#endif
230
231 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
232 /* flag mdc connection as lightweight, only used for test
233 * purpose, use with care */
234 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
235
236 data->ocd_ibits_known = MDS_INODELOCK_FULL;
237 data->ocd_version = LUSTRE_VERSION_CODE;
238
239 if (sb->s_flags & MS_RDONLY)
240 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
241 if (sbi->ll_flags & LL_SBI_USER_XATTR)
242 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
243
244#ifdef HAVE_MS_FLOCK_LOCK
245 /* force vfs to use lustre handler for flock() calls - bug 10743 */
246 sb->s_flags |= MS_FLOCK_LOCK;
247#endif
248#ifdef MS_HAS_NEW_AOPS
249 sb->s_flags |= MS_HAS_NEW_AOPS;
250#endif
251
252 if (sbi->ll_flags & LL_SBI_FLOCK)
253 sbi->ll_fop = &ll_file_operations_flock;
254 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
255 sbi->ll_fop = &ll_file_operations;
256 else
257 sbi->ll_fop = &ll_file_operations_noflock;
258
259 /* real client */
260 data->ocd_connect_flags |= OBD_CONNECT_REAL;
261 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
262 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
263
264 data->ocd_brw_size = MD_MAX_BRW_SIZE;
265
266 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid, data, NULL);
267 if (err == -EBUSY) {
268 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
269 "recovery, of which this client is not a "
270 "part. Please wait for recovery to complete,"
271 " abort, or time out.\n", md);
272 GOTO(out, err);
273 } else if (err) {
274 CERROR("cannot connect to %s: rc = %d\n", md, err);
275 GOTO(out, err);
276 }
277
278 sbi->ll_md_exp->exp_connect_data = *data;
279
280 err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
281 LUSTRE_SEQ_METADATA);
282 if (err) {
283 CERROR("%s: Can't init metadata layer FID infrastructure, "
284 "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
285 GOTO(out_md, err);
286 }
287
288 /* For mount, we only need fs info from MDT0, and also in DNE, it
289 * can make sure the client can be mounted as long as MDT0 is
290 * avaible */
291 err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
292 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
293 OBD_STATFS_FOR_MDT0);
294 if (err)
295 GOTO(out_md_fid, err);
296
297 /* This needs to be after statfs to ensure connect has finished.
298 * Note that "data" does NOT contain the valid connect reply.
299 * If connecting to a 1.8 server there will be no LMV device, so
300 * we can access the MDC export directly and exp_connect_flags will
301 * be non-zero, but if accessing an upgraded 2.1 server it will
302 * have the correct flags filled in.
303 * XXX: fill in the LMV exp_connect_flags from MDC(s). */
304 valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
305 if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
306 valid != CLIENT_CONNECT_MDT_REQD) {
307 char *buf;
308
309 OBD_ALLOC_WAIT(buf, PAGE_CACHE_SIZE);
310 obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
311 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
312 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
313 "feature(s) needed for correct operation "
314 "of this client (%s). Please upgrade "
315 "server or downgrade client.\n",
316 sbi->ll_md_exp->exp_obd->obd_name, buf);
317 OBD_FREE(buf, PAGE_CACHE_SIZE);
318 GOTO(out_md_fid, err = -EPROTO);
319 }
320
321 size = sizeof(*data);
322 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
323 KEY_CONN_DATA, &size, data, NULL);
324 if (err) {
325 CERROR("%s: Get connect data failed: rc = %d\n",
326 sbi->ll_md_exp->exp_obd->obd_name, err);
327 GOTO(out_md_fid, err);
328 }
329
330 LASSERT(osfs->os_bsize);
331 sb->s_blocksize = osfs->os_bsize;
332 sb->s_blocksize_bits = log2(osfs->os_bsize);
333 sb->s_magic = LL_SUPER_MAGIC;
334 sb->s_maxbytes = MAX_LFS_FILESIZE;
335 sbi->ll_namelen = osfs->os_namelen;
336 sbi->ll_max_rw_chunk = LL_DEFAULT_MAX_RW_CHUNK;
337
338 if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
339 !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
340 LCONSOLE_INFO("Disabling user_xattr feature because "
341 "it is not supported on the server\n");
342 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
343 }
344
345 if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
346#ifdef MS_POSIXACL
347 sb->s_flags |= MS_POSIXACL;
348#endif
349 sbi->ll_flags |= LL_SBI_ACL;
350 } else {
351 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
352#ifdef MS_POSIXACL
353 sb->s_flags &= ~MS_POSIXACL;
354#endif
355 sbi->ll_flags &= ~LL_SBI_ACL;
356 }
357
358 if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
359 if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
360 sbi->ll_flags |= LL_SBI_RMT_CLIENT;
361 LCONSOLE_INFO("client is set as remote by default.\n");
362 }
363 } else {
364 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
365 sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
366 LCONSOLE_INFO("client claims to be remote, but server "
367 "rejected, forced to be local.\n");
368 }
369 }
370
371 if (data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) {
372 LCONSOLE_INFO("client enabled MDS capability!\n");
373 sbi->ll_flags |= LL_SBI_MDS_CAPA;
374 }
375
376 if (data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA) {
377 LCONSOLE_INFO("client enabled OSS capability!\n");
378 sbi->ll_flags |= LL_SBI_OSS_CAPA;
379 }
380
381 if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
382 sbi->ll_flags |= LL_SBI_64BIT_HASH;
383
384 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
385 sbi->ll_md_brw_size = data->ocd_brw_size;
386 else
387 sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
388
389 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
390 LCONSOLE_INFO("Layout lock feature supported.\n");
391 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
392 }
393
394 obd = class_name2obd(dt);
395 if (!obd) {
396 CERROR("DT %s: not setup or attached\n", dt);
397 GOTO(out_md_fid, err = -ENODEV);
398 }
399
400 data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
401 OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
402 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
403 OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
404 OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
405 OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
406 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH |
407 OBD_CONNECT_MAXBYTES |
408 OBD_CONNECT_EINPROGRESS |
409 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
410 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
411
412 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
413 data->ocd_connect_flags |= OBD_CONNECT_SOM;
414
415 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
416 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
417 * disabled by default, because it can still be enabled on the
418 * fly via /proc. As a consequence, we still need to come to an
419 * agreement on the supported algorithms at connect time */
420 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
421
422 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
423 data->ocd_cksum_types = OBD_CKSUM_ADLER;
424 else
425 data->ocd_cksum_types = cksum_types_supported_client();
426 }
427
428 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
429 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
430 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
431
432 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d "
433 "ocd_grant: %d\n", data->ocd_connect_flags,
434 data->ocd_version, data->ocd_grant);
435
436 obd->obd_upcall.onu_owner = &sbi->ll_lco;
437 obd->obd_upcall.onu_upcall = cl_ocd_update;
438
439 data->ocd_brw_size = DT_MAX_BRW_SIZE;
440
441 err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
442 NULL);
443 if (err == -EBUSY) {
444 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
445 "recovery, of which this client is not a "
446 "part. Please wait for recovery to "
447 "complete, abort, or time out.\n", dt);
448 GOTO(out_md, err);
449 } else if (err) {
450 CERROR("%s: Cannot connect to %s: rc = %d\n",
451 sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
452 GOTO(out_md, err);
453 }
454
455 sbi->ll_dt_exp->exp_connect_data = *data;
456
457 err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
458 LUSTRE_SEQ_METADATA);
459 if (err) {
460 CERROR("%s: Can't init data layer FID infrastructure, "
461 "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
462 GOTO(out_dt, err);
463 }
464
465 mutex_lock(&sbi->ll_lco.lco_lock);
466 sbi->ll_lco.lco_flags = data->ocd_connect_flags;
467 sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
468 sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
469 mutex_unlock(&sbi->ll_lco.lco_lock);
470
471 fid_zero(&sbi->ll_root_fid);
472 err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
473 if (err) {
474 CERROR("cannot mds_connect: rc = %d\n", err);
475 GOTO(out_lock_cn_cb, err);
476 }
477 if (!fid_is_sane(&sbi->ll_root_fid)) {
478 CERROR("%s: Invalid root fid "DFID" during mount\n",
479 sbi->ll_md_exp->exp_obd->obd_name,
480 PFID(&sbi->ll_root_fid));
481 GOTO(out_lock_cn_cb, err = -EINVAL);
482 }
483 CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
484
485 sb->s_op = &lustre_super_operations;
486#if THREAD_SIZE >= 8192 /*b=17630*/
487 sb->s_export_op = &lustre_export_operations;
488#endif
489
490 /* make root inode
491 * XXX: move this to after cbd setup? */
492 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMDSCAPA;
493 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
494 valid |= OBD_MD_FLRMTPERM;
495 else if (sbi->ll_flags & LL_SBI_ACL)
496 valid |= OBD_MD_FLACL;
497
498 OBD_ALLOC_PTR(op_data);
499 if (op_data == NULL)
500 GOTO(out_lock_cn_cb, err = -ENOMEM);
501
502 op_data->op_fid1 = sbi->ll_root_fid;
503 op_data->op_mode = 0;
504 op_data->op_capa1 = oc;
505 op_data->op_valid = valid;
506
507 err = md_getattr(sbi->ll_md_exp, op_data, &request);
508 if (oc)
509 capa_put(oc);
510 OBD_FREE_PTR(op_data);
511 if (err) {
512 CERROR("%s: md_getattr failed for root: rc = %d\n",
513 sbi->ll_md_exp->exp_obd->obd_name, err);
514 GOTO(out_lock_cn_cb, err);
515 }
516
517 err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
518 sbi->ll_md_exp, &lmd);
519 if (err) {
520 CERROR("failed to understand root inode md: rc = %d\n", err);
521 ptlrpc_req_finished(request);
522 GOTO(out_lock_cn_cb, err);
523 }
524
525 LASSERT(fid_is_sane(&sbi->ll_root_fid));
526 root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
527 ll_need_32bit_api(sbi)),
528 &lmd);
529 md_free_lustre_md(sbi->ll_md_exp, &lmd);
530 ptlrpc_req_finished(request);
531
532 if (root == NULL || IS_ERR(root)) {
533 if (lmd.lsm)
534 obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
535#ifdef CONFIG_FS_POSIX_ACL
536 if (lmd.posix_acl) {
537 posix_acl_release(lmd.posix_acl);
538 lmd.posix_acl = NULL;
539 }
540#endif
541 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
542 root = NULL;
543 CERROR("lustre_lite: bad iget4 for root\n");
544 GOTO(out_root, err);
545 }
546
547 err = ll_close_thread_start(&sbi->ll_lcq);
548 if (err) {
549 CERROR("cannot start close thread: rc %d\n", err);
550 GOTO(out_root, err);
551 }
552
553#ifdef CONFIG_FS_POSIX_ACL
554 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
555 rct_init(&sbi->ll_rct);
556 et_init(&sbi->ll_et);
557 }
558#endif
559
560 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
561 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
562 KEY_CHECKSUM, sizeof(checksum), &checksum,
563 NULL);
564 cl_sb_init(sb);
565
566 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
567 KEY_CACHE_SET, sizeof(sbi->ll_cache),
568 &sbi->ll_cache, NULL);
569
570 sb->s_root = d_make_root(root);
571 if (sb->s_root == NULL) {
572 CERROR("%s: can't make root dentry\n",
573 ll_get_fsname(sb, NULL, 0));
574 GOTO(out_root, err = -ENOMEM);
575 }
576
577 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
578 d_set_d_op(sb->s_root, &ll_d_root_ops);
579 sb->s_d_op = &ll_d_ops;
580
581 sbi->ll_sdev_orig = sb->s_dev;
582
583 /* We set sb->s_dev equal on all lustre clients in order to support
584 * NFS export clustering. NFSD requires that the FSID be the same
585 * on all clients. */
586 /* s_dev is also used in lt_compare() to compare two fs, but that is
587 * only a node-local comparison. */
588 uuid = obd_get_uuid(sbi->ll_md_exp);
589 if (uuid != NULL)
590 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
591
592 if (data != NULL)
593 OBD_FREE_PTR(data);
594 if (osfs != NULL)
595 OBD_FREE_PTR(osfs);
596
597 RETURN(err);
598out_root:
599 if (root)
600 iput(root);
601out_lock_cn_cb:
602 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
603out_dt:
604 obd_disconnect(sbi->ll_dt_exp);
605 sbi->ll_dt_exp = NULL;
606 /* Make sure all OScs are gone, since cl_cache is accessing sbi. */
607 obd_zombie_barrier();
608out_md_fid:
609 obd_fid_fini(sbi->ll_md_exp->exp_obd);
610out_md:
611 obd_disconnect(sbi->ll_md_exp);
612 sbi->ll_md_exp = NULL;
613out:
614 if (data != NULL)
615 OBD_FREE_PTR(data);
616 if (osfs != NULL)
617 OBD_FREE_PTR(osfs);
618 lprocfs_unregister_mountpoint(sbi);
619 return err;
620}
621
622int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
623{
624 int size, rc;
625
626 *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
627 size = sizeof(int);
628 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
629 KEY_MAX_EASIZE, &size, lmmsize, NULL);
630 if (rc)
631 CERROR("Get max mdsize error rc %d \n", rc);
632
633 RETURN(rc);
634}
635
636void ll_dump_inode(struct inode *inode)
637{
638 struct ll_d_hlist_node *tmp;
639 int dentry_count = 0;
640
641 LASSERT(inode != NULL);
642
643 ll_d_hlist_for_each(tmp, &inode->i_dentry)
644 dentry_count++;
645
646 CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
647 inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
648 inode->i_mode, atomic_read(&inode->i_count), dentry_count);
649}
650
651void lustre_dump_dentry(struct dentry *dentry, int recur)
652{
653 struct list_head *tmp;
654 int subdirs = 0;
655
656 LASSERT(dentry != NULL);
657
658 list_for_each(tmp, &dentry->d_subdirs)
659 subdirs++;
660
661 CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u,"
662 " flags=0x%x, fsdata=%p, %d subdirs\n", dentry,
663 dentry->d_name.len, dentry->d_name.name,
664 dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
665 dentry->d_parent, dentry->d_inode, d_refcount(dentry),
666 dentry->d_flags, dentry->d_fsdata, subdirs);
667 if (dentry->d_inode != NULL)
668 ll_dump_inode(dentry->d_inode);
669
670 if (recur == 0)
671 return;
672
673 list_for_each(tmp, &dentry->d_subdirs) {
674 struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child);
675 lustre_dump_dentry(d, recur - 1);
676 }
677}
678
679void client_common_put_super(struct super_block *sb)
680{
681 struct ll_sb_info *sbi = ll_s2sbi(sb);
682 ENTRY;
683
684#ifdef CONFIG_FS_POSIX_ACL
685 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
686 et_fini(&sbi->ll_et);
687 rct_fini(&sbi->ll_rct);
688 }
689#endif
690
691 ll_close_thread_shutdown(sbi->ll_lcq);
692
693 cl_sb_fini(sb);
694
695 list_del(&sbi->ll_conn_chain);
696
697 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
698 obd_disconnect(sbi->ll_dt_exp);
699 sbi->ll_dt_exp = NULL;
700 /* wait till all OSCs are gone, since cl_cache is accessing sbi.
701 * see LU-2543. */
702 obd_zombie_barrier();
703
704 lprocfs_unregister_mountpoint(sbi);
705
706 obd_fid_fini(sbi->ll_md_exp->exp_obd);
707 obd_disconnect(sbi->ll_md_exp);
708 sbi->ll_md_exp = NULL;
709
710 EXIT;
711}
712
713void ll_kill_super(struct super_block *sb)
714{
715 struct ll_sb_info *sbi;
716
717 ENTRY;
718
719 /* not init sb ?*/
720 if (!(sb->s_flags & MS_ACTIVE))
721 return;
722
723 sbi = ll_s2sbi(sb);
724 /* we need restore s_dev from changed for clustred NFS before put_super
725 * because new kernels have cached s_dev and change sb->s_dev in
726 * put_super not affected real removing devices */
65fb55d1 727 if (sbi) {
d7e09d03 728 sb->s_dev = sbi->ll_sdev_orig;
65fb55d1
NY
729 sbi->ll_umounting = 1;
730 }
d7e09d03
PT
731 EXIT;
732}
733
734char *ll_read_opt(const char *opt, char *data)
735{
736 char *value;
737 char *retval;
738 ENTRY;
739
740 CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
741 if (strncmp(opt, data, strlen(opt)))
742 RETURN(NULL);
743 if ((value = strchr(data, '=')) == NULL)
744 RETURN(NULL);
745
746 value++;
747 OBD_ALLOC(retval, strlen(value) + 1);
748 if (!retval) {
749 CERROR("out of memory!\n");
750 RETURN(NULL);
751 }
752
753 memcpy(retval, value, strlen(value)+1);
754 CDEBUG(D_SUPER, "Assigned option: %s, value %s\n", opt, retval);
755 RETURN(retval);
756}
757
758static inline int ll_set_opt(const char *opt, char *data, int fl)
759{
760 if (strncmp(opt, data, strlen(opt)) != 0)
761 return(0);
762 else
763 return(fl);
764}
765
766/* non-client-specific mount options are parsed in lmd_parse */
767static int ll_options(char *options, int *flags)
768{
769 int tmp;
770 char *s1 = options, *s2;
771 ENTRY;
772
773 if (!options)
774 RETURN(0);
775
776 CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
777
778 while (*s1) {
779 CDEBUG(D_SUPER, "next opt=%s\n", s1);
780 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
781 if (tmp) {
782 *flags |= tmp;
783 goto next;
784 }
785 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
786 if (tmp) {
787 *flags |= tmp;
788 goto next;
789 }
790 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
791 if (tmp) {
792 *flags |= tmp;
793 goto next;
794 }
795 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
796 if (tmp) {
797 *flags &= ~tmp;
798 goto next;
799 }
800 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
801 if (tmp) {
802 *flags |= tmp;
803 goto next;
804 }
805 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
806 if (tmp) {
807 *flags &= ~tmp;
808 goto next;
809 }
810#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 5, 50, 0)
811 tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
812 if (tmp) {
813 /* Ignore deprecated mount option. The client will
814 * always try to mount with ACL support, whether this
815 * is used depends on whether server supports it. */
816 LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
817 "mount option 'acl'.\n");
818 goto next;
819 }
820 tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
821 if (tmp) {
822 LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
823 "mount option 'noacl'.\n");
824 goto next;
825 }
826#else
827#warning "{no}acl options have been deprecated since 1.8, please remove them"
828#endif
829 tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
830 if (tmp) {
831 *flags |= tmp;
832 goto next;
833 }
834 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
835 if (tmp) {
836 *flags |= tmp;
837 goto next;
838 }
839 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
840 if (tmp) {
841 *flags &= ~tmp;
842 goto next;
843 }
844
845 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
846 if (tmp) {
847 *flags |= tmp;
848 goto next;
849 }
850 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
851 if (tmp) {
852 *flags &= ~tmp;
853 goto next;
854 }
855 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
856 if (tmp) {
857 *flags |= tmp;
858 goto next;
859 }
860 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
861 if (tmp) {
862 *flags &= ~tmp;
863 goto next;
864 }
865 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
866 if (tmp) {
867 *flags |= tmp;
868 goto next;
869 }
870 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
871 if (tmp) {
872 *flags &= ~tmp;
873 goto next;
874 }
875 tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
876 if (tmp) {
877 *flags |= tmp;
878 goto next;
879 }
880 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
881 if (tmp) {
882 *flags |= tmp;
883 goto next;
884 }
885 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
886 if (tmp) {
887 *flags |= tmp;
888 goto next;
889 }
890 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
891 if (tmp) {
892 *flags &= ~tmp;
893 goto next;
894 }
895 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
896 s1);
897 RETURN(-EINVAL);
898
899next:
900 /* Find next opt */
901 s2 = strchr(s1, ',');
902 if (s2 == NULL)
903 break;
904 s1 = s2 + 1;
905 }
906 RETURN(0);
907}
908
909void ll_lli_init(struct ll_inode_info *lli)
910{
911 lli->lli_inode_magic = LLI_INODE_MAGIC;
912 lli->lli_flags = 0;
913 lli->lli_ioepoch = 0;
914 lli->lli_maxbytes = MAX_LFS_FILESIZE;
915 spin_lock_init(&lli->lli_lock);
916 lli->lli_posix_acl = NULL;
917 lli->lli_remote_perms = NULL;
918 mutex_init(&lli->lli_rmtperm_mutex);
919 /* Do not set lli_fid, it has been initialized already. */
920 fid_zero(&lli->lli_pfid);
921 INIT_LIST_HEAD(&lli->lli_close_list);
922 INIT_LIST_HEAD(&lli->lli_oss_capas);
923 atomic_set(&lli->lli_open_count, 0);
924 lli->lli_mds_capa = NULL;
925 lli->lli_rmtperm_time = 0;
926 lli->lli_pending_och = NULL;
927 lli->lli_mds_read_och = NULL;
928 lli->lli_mds_write_och = NULL;
929 lli->lli_mds_exec_och = NULL;
930 lli->lli_open_fd_read_count = 0;
931 lli->lli_open_fd_write_count = 0;
932 lli->lli_open_fd_exec_count = 0;
933 mutex_init(&lli->lli_och_mutex);
934 spin_lock_init(&lli->lli_agl_lock);
935 lli->lli_has_smd = false;
936 lli->lli_layout_gen = LL_LAYOUT_GEN_NONE;
937 lli->lli_clob = NULL;
938
939 LASSERT(lli->lli_vfs_inode.i_mode != 0);
940 if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
941 mutex_init(&lli->lli_readdir_mutex);
942 lli->lli_opendir_key = NULL;
943 lli->lli_sai = NULL;
944 lli->lli_def_acl = NULL;
945 spin_lock_init(&lli->lli_sa_lock);
946 lli->lli_opendir_pid = 0;
947 } else {
948 sema_init(&lli->lli_size_sem, 1);
949 lli->lli_size_sem_owner = NULL;
950 lli->lli_symlink_name = NULL;
951 init_rwsem(&lli->lli_trunc_sem);
952 mutex_init(&lli->lli_write_mutex);
953 init_rwsem(&lli->lli_glimpse_sem);
954 lli->lli_glimpse_time = 0;
955 INIT_LIST_HEAD(&lli->lli_agl_list);
956 lli->lli_agl_index = 0;
957 lli->lli_async_rc = 0;
958 lli->lli_volatile = false;
959 }
960 mutex_init(&lli->lli_layout_mutex);
961}
962
963static inline int ll_bdi_register(struct backing_dev_info *bdi)
964{
965 static atomic_t ll_bdi_num = ATOMIC_INIT(0);
966
967 bdi->name = "lustre";
968 return bdi_register(bdi, NULL, "lustre-%d",
969 atomic_inc_return(&ll_bdi_num));
970}
971
972int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
973{
974 struct lustre_profile *lprof = NULL;
975 struct lustre_sb_info *lsi = s2lsi(sb);
976 struct ll_sb_info *sbi;
977 char *dt = NULL, *md = NULL;
978 char *profilenm = get_profile_name(sb);
979 struct config_llog_instance *cfg;
980 /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
981 const int instlen = sizeof(cfg->cfg_instance) * 2 + 2;
982 int err;
983 ENTRY;
984
985 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
986
987 OBD_ALLOC_PTR(cfg);
988 if (cfg == NULL)
989 RETURN(-ENOMEM);
990
991 try_module_get(THIS_MODULE);
992
993 /* client additional sb info */
994 lsi->lsi_llsbi = sbi = ll_init_sbi();
995 if (!sbi) {
996 module_put(THIS_MODULE);
997 OBD_FREE_PTR(cfg);
998 RETURN(-ENOMEM);
999 }
1000
1001 err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
1002 if (err)
1003 GOTO(out_free, err);
1004
1005 err = bdi_init(&lsi->lsi_bdi);
1006 if (err)
1007 GOTO(out_free, err);
1008 lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1009 lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1010 err = ll_bdi_register(&lsi->lsi_bdi);
1011 if (err)
1012 GOTO(out_free, err);
1013
1014 sb->s_bdi = &lsi->lsi_bdi;
1015
1016 /* Generate a string unique to this super, in case some joker tries
1017 to mount the same fs at two mount points.
1018 Use the address of the super itself.*/
1019 cfg->cfg_instance = sb;
1020 cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1021 cfg->cfg_callback = class_config_llog_handler;
1022 /* set up client obds */
1023 err = lustre_process_log(sb, profilenm, cfg);
1024 if (err < 0) {
1025 CERROR("Unable to process log: %d\n", err);
1026 GOTO(out_free, err);
1027 }
1028
1029 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1030 lprof = class_get_profile(profilenm);
1031 if (lprof == NULL) {
1032 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1033 " read from the MGS. Does that filesystem "
1034 "exist?\n", profilenm);
1035 GOTO(out_free, err = -EINVAL);
1036 }
1037 CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1038 lprof->lp_md, lprof->lp_dt);
1039
1040 OBD_ALLOC(dt, strlen(lprof->lp_dt) + instlen + 2);
1041 if (!dt)
1042 GOTO(out_free, err = -ENOMEM);
1043 sprintf(dt, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
1044
1045 OBD_ALLOC(md, strlen(lprof->lp_md) + instlen + 2);
1046 if (!md)
1047 GOTO(out_free, err = -ENOMEM);
1048 sprintf(md, "%s-%p", lprof->lp_md, cfg->cfg_instance);
1049
1050 /* connections, registrations, sb setup */
1051 err = client_common_fill_super(sb, md, dt, mnt);
1052
1053out_free:
1054 if (md)
1055 OBD_FREE(md, strlen(lprof->lp_md) + instlen + 2);
1056 if (dt)
1057 OBD_FREE(dt, strlen(lprof->lp_dt) + instlen + 2);
1058 if (err)
1059 ll_put_super(sb);
1060 else if (sbi->ll_flags & LL_SBI_VERBOSE)
1061 LCONSOLE_WARN("Mounted %s\n", profilenm);
1062
1063 OBD_FREE_PTR(cfg);
1064 RETURN(err);
1065} /* ll_fill_super */
1066
1067
1068void lu_context_keys_dump(void);
1069
1070void ll_put_super(struct super_block *sb)
1071{
1072 struct config_llog_instance cfg;
1073 struct obd_device *obd;
1074 struct lustre_sb_info *lsi = s2lsi(sb);
1075 struct ll_sb_info *sbi = ll_s2sbi(sb);
1076 char *profilenm = get_profile_name(sb);
1077 int ccc_count, next, force = 1, rc = 0;
1078 ENTRY;
1079
1080 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
1081
1082 ll_print_capa_stat(sbi);
1083
1084 cfg.cfg_instance = sb;
1085 lustre_end_log(sb, profilenm, &cfg);
1086
1087 if (sbi->ll_md_exp) {
1088 obd = class_exp2obd(sbi->ll_md_exp);
1089 if (obd)
1090 force = obd->obd_force;
1091 }
1092
1093 /* Wait for unstable pages to be committed to stable storage */
1094 if (force == 0) {
1095 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
1096 rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
1097 atomic_read(&sbi->ll_cache.ccc_unstable_nr) == 0,
1098 &lwi);
1099 }
1100
1101 ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
1102 if (force == 0 && rc != -EINTR)
1103 LASSERTF(ccc_count == 0, "count: %i\n", ccc_count);
1104
1105
1106 /* We need to set force before the lov_disconnect in
1107 lustre_common_put_super, since l_d cleans up osc's as well. */
1108 if (force) {
1109 next = 0;
1110 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1111 &next)) != NULL) {
1112 obd->obd_force = force;
1113 }
1114 }
1115
1116 if (sbi->ll_lcq) {
1117 /* Only if client_common_fill_super succeeded */
1118 client_common_put_super(sb);
1119 }
1120
1121 next = 0;
1122 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
1123 class_manual_cleanup(obd);
1124 }
1125
1126 if (sbi->ll_flags & LL_SBI_VERBOSE)
1127 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1128
1129 if (profilenm)
1130 class_del_profile(profilenm);
1131
1132 if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1133 bdi_destroy(&lsi->lsi_bdi);
1134 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1135 }
1136
1137 ll_free_sbi(sb);
1138 lsi->lsi_llsbi = NULL;
1139
1140 lustre_common_put_super(sb);
1141
1142 module_put(THIS_MODULE);
1143
1144 EXIT;
1145} /* client_put_super */
1146
1147struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1148{
1149 struct inode *inode = NULL;
1150
1151 /* NOTE: we depend on atomic igrab() -bzzz */
1152 lock_res_and_lock(lock);
1153 if (lock->l_resource->lr_lvb_inode) {
1154 struct ll_inode_info * lli;
1155 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1156 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1157 inode = igrab(lock->l_resource->lr_lvb_inode);
1158 } else {
1159 inode = lock->l_resource->lr_lvb_inode;
1160 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
1161 D_WARNING, lock, "lr_lvb_inode %p is "
1162 "bogus: magic %08x",
1163 lock->l_resource->lr_lvb_inode,
1164 lli->lli_inode_magic);
1165 inode = NULL;
1166 }
1167 }
1168 unlock_res_and_lock(lock);
1169 return inode;
1170}
1171
1172struct inode *ll_inode_from_lock(struct ldlm_lock *lock)
1173{
1174 struct inode *inode = NULL;
1175 /* NOTE: we depend on atomic igrab() -bzzz */
1176 lock_res_and_lock(lock);
1177 if (lock->l_ast_data) {
1178 struct ll_inode_info *lli = ll_i2info(lock->l_ast_data);
1179 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1180 inode = igrab(lock->l_ast_data);
1181 } else {
1182 inode = lock->l_ast_data;
1183 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
1184 D_WARNING, lock, "l_ast_data %p is "
1185 "bogus: magic %08x", lock->l_ast_data,
1186 lli->lli_inode_magic);
1187 inode = NULL;
1188 }
1189 }
1190 unlock_res_and_lock(lock);
1191 return inode;
1192}
1193
1194void ll_clear_inode(struct inode *inode)
1195{
1196 struct ll_inode_info *lli = ll_i2info(inode);
1197 struct ll_sb_info *sbi = ll_i2sbi(inode);
1198 ENTRY;
1199
1200 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1201 inode->i_generation, inode);
1202
1203 if (S_ISDIR(inode->i_mode)) {
1204 /* these should have been cleared in ll_file_release */
1205 LASSERT(lli->lli_opendir_key == NULL);
1206 LASSERT(lli->lli_sai == NULL);
1207 LASSERT(lli->lli_opendir_pid == 0);
1208 }
1209
1210 ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1211 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1212
1213 LASSERT(!lli->lli_open_fd_write_count);
1214 LASSERT(!lli->lli_open_fd_read_count);
1215 LASSERT(!lli->lli_open_fd_exec_count);
1216
1217 if (lli->lli_mds_write_och)
1218 ll_md_real_close(inode, FMODE_WRITE);
1219 if (lli->lli_mds_exec_och)
1220 ll_md_real_close(inode, FMODE_EXEC);
1221 if (lli->lli_mds_read_och)
1222 ll_md_real_close(inode, FMODE_READ);
1223
1224 if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1225 OBD_FREE(lli->lli_symlink_name,
1226 strlen(lli->lli_symlink_name) + 1);
1227 lli->lli_symlink_name = NULL;
1228 }
1229
1230 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1231 LASSERT(lli->lli_posix_acl == NULL);
1232 if (lli->lli_remote_perms) {
1233 free_rmtperm_hash(lli->lli_remote_perms);
1234 lli->lli_remote_perms = NULL;
1235 }
1236 }
1237#ifdef CONFIG_FS_POSIX_ACL
1238 else if (lli->lli_posix_acl) {
1239 LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
1240 LASSERT(lli->lli_remote_perms == NULL);
1241 posix_acl_release(lli->lli_posix_acl);
1242 lli->lli_posix_acl = NULL;
1243 }
1244#endif
1245 lli->lli_inode_magic = LLI_INODE_DEAD;
1246
1247 ll_clear_inode_capas(inode);
1248 if (!S_ISDIR(inode->i_mode))
1249 LASSERT(list_empty(&lli->lli_agl_list));
1250
1251 /*
1252 * XXX This has to be done before lsm is freed below, because
1253 * cl_object still uses inode lsm.
1254 */
1255 cl_inode_fini(inode);
1256 lli->lli_has_smd = false;
1257
1258 EXIT;
1259}
1260
1261int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
1262 struct md_open_data **mod)
1263{
1264 struct lustre_md md;
1265 struct inode *inode = dentry->d_inode;
1266 struct ll_sb_info *sbi = ll_i2sbi(inode);
1267 struct ptlrpc_request *request = NULL;
1268 int rc, ia_valid;
1269 ENTRY;
1270
1271 op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1272 LUSTRE_OPC_ANY, NULL);
1273 if (IS_ERR(op_data))
1274 RETURN(PTR_ERR(op_data));
1275
1276 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
1277 &request, mod);
1278 if (rc) {
1279 ptlrpc_req_finished(request);
1280 if (rc == -ENOENT) {
1281 clear_nlink(inode);
1282 /* Unlinked special device node? Or just a race?
1283 * Pretend we done everything. */
1284 if (!S_ISREG(inode->i_mode) &&
1285 !S_ISDIR(inode->i_mode)) {
1286 ia_valid = op_data->op_attr.ia_valid;
1287 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1288 rc = simple_setattr(dentry, &op_data->op_attr);
1289 op_data->op_attr.ia_valid = ia_valid;
1290 }
1291 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1292 CERROR("md_setattr fails: rc = %d\n", rc);
1293 }
1294 RETURN(rc);
1295 }
1296
1297 rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1298 sbi->ll_md_exp, &md);
1299 if (rc) {
1300 ptlrpc_req_finished(request);
1301 RETURN(rc);
1302 }
1303
1304 ia_valid = op_data->op_attr.ia_valid;
1305 /* inode size will be in ll_setattr_ost, can't do it now since dirty
1306 * cache is not cleared yet. */
1307 op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1308 rc = simple_setattr(dentry, &op_data->op_attr);
1309 op_data->op_attr.ia_valid = ia_valid;
1310
1311 /* Extract epoch data if obtained. */
1312 op_data->op_handle = md.body->handle;
1313 op_data->op_ioepoch = md.body->ioepoch;
1314
1315 ll_update_inode(inode, &md);
1316 ptlrpc_req_finished(request);
1317
1318 RETURN(rc);
1319}
1320
1321/* Close IO epoch and send Size-on-MDS attribute update. */
1322static int ll_setattr_done_writing(struct inode *inode,
1323 struct md_op_data *op_data,
1324 struct md_open_data *mod)
1325{
1326 struct ll_inode_info *lli = ll_i2info(inode);
1327 int rc = 0;
1328 ENTRY;
1329
1330 LASSERT(op_data != NULL);
1331 if (!S_ISREG(inode->i_mode))
1332 RETURN(0);
1333
1334 CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID" for truncate\n",
1335 op_data->op_ioepoch, PFID(&lli->lli_fid));
1336
1337 op_data->op_flags = MF_EPOCH_CLOSE;
1338 ll_done_writing_attr(inode, op_data);
1339 ll_pack_inode2opdata(inode, op_data, NULL);
1340
1341 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1342 if (rc == -EAGAIN) {
1343 /* MDS has instructed us to obtain Size-on-MDS attribute
1344 * from OSTs and send setattr to back to MDS. */
1345 rc = ll_som_update(inode, op_data);
1346 } else if (rc) {
1347 CERROR("inode %lu mdc truncate failed: rc = %d\n",
1348 inode->i_ino, rc);
1349 }
1350 RETURN(rc);
1351}
1352
1353static int ll_setattr_ost(struct inode *inode, struct iattr *attr)
1354{
1355 struct obd_capa *capa;
1356 int rc;
1357
1358 if (attr->ia_valid & ATTR_SIZE)
1359 capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
1360 else
1361 capa = ll_mdscapa_get(inode);
1362
1363 rc = cl_setattr_ost(inode, attr, capa);
1364
1365 if (attr->ia_valid & ATTR_SIZE)
1366 ll_truncate_free_capa(capa);
1367 else
1368 capa_put(capa);
1369
1370 return rc;
1371}
1372
1373
1374/* If this inode has objects allocated to it (lsm != NULL), then the OST
1375 * object(s) determine the file size and mtime. Otherwise, the MDS will
1376 * keep these values until such a time that objects are allocated for it.
1377 * We do the MDS operations first, as it is checking permissions for us.
1378 * We don't to the MDS RPC if there is nothing that we want to store there,
1379 * otherwise there is no harm in updating mtime/atime on the MDS if we are
1380 * going to do an RPC anyways.
1381 *
1382 * If we are doing a truncate, we will send the mtime and ctime updates
1383 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1384 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1385 * at the same time.
1386 */
1387int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
1388{
1389 struct inode *inode = dentry->d_inode;
1390 struct ll_inode_info *lli = ll_i2info(inode);
1391 struct md_op_data *op_data = NULL;
1392 struct md_open_data *mod = NULL;
1393 int rc = 0, rc1 = 0;
1394 ENTRY;
1395
1396 CDEBUG(D_VFSTRACE, "%s: setattr inode %p/fid:"DFID" from %llu to %llu, "
1397 "valid %x\n", ll_get_fsname(inode->i_sb, NULL, 0), inode,
1398 PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
1399 attr->ia_valid);
1400
1401 if (attr->ia_valid & ATTR_SIZE) {
1402 /* Check new size against VFS/VM file size limit and rlimit */
1403 rc = inode_newsize_ok(inode, attr->ia_size);
1404 if (rc)
1405 RETURN(rc);
1406
1407 /* The maximum Lustre file size is variable, based on the
1408 * OST maximum object size and number of stripes. This
1409 * needs another check in addition to the VFS check above. */
1410 if (attr->ia_size > ll_file_maxbytes(inode)) {
1411 CDEBUG(D_INODE,"file "DFID" too large %llu > "LPU64"\n",
1412 PFID(&lli->lli_fid), attr->ia_size,
1413 ll_file_maxbytes(inode));
1414 RETURN(-EFBIG);
1415 }
1416
1417 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1418 }
1419
1420 /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1421 if (attr->ia_valid & TIMES_SET_FLAGS) {
1422 if (current_fsuid() != inode->i_uid &&
1423 !cfs_capable(CFS_CAP_FOWNER))
1424 RETURN(-EPERM);
1425 }
1426
1427 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1428 if (attr->ia_valid & ATTR_CTIME) {
1429 attr->ia_ctime = CFS_CURRENT_TIME;
1430 attr->ia_valid |= ATTR_CTIME_SET;
1431 }
1432 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1433 (attr->ia_valid & ATTR_ATIME)) {
1434 attr->ia_atime = CFS_CURRENT_TIME;
1435 attr->ia_valid |= ATTR_ATIME_SET;
1436 }
1437 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1438 (attr->ia_valid & ATTR_MTIME)) {
1439 attr->ia_mtime = CFS_CURRENT_TIME;
1440 attr->ia_valid |= ATTR_MTIME_SET;
1441 }
1442
1443 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1444 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
1445 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1446 cfs_time_current_sec());
1447
1448 /* If we are changing file size, file content is modified, flag it. */
1449 if (attr->ia_valid & ATTR_SIZE) {
1450 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1451 spin_lock(&lli->lli_lock);
1452 lli->lli_flags |= LLIF_DATA_MODIFIED;
1453 spin_unlock(&lli->lli_lock);
1454 }
1455
1456 /* We always do an MDS RPC, even if we're only changing the size;
1457 * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1458
1459 OBD_ALLOC_PTR(op_data);
1460 if (op_data == NULL)
1461 RETURN(-ENOMEM);
1462
1463 if (!S_ISDIR(inode->i_mode)) {
1464 if (attr->ia_valid & ATTR_SIZE)
1465 inode_dio_write_done(inode);
1466 mutex_unlock(&inode->i_mutex);
1467 down_write(&lli->lli_trunc_sem);
1468 }
1469
1470 memcpy(&op_data->op_attr, attr, sizeof(*attr));
1471
1472 /* Open epoch for truncate. */
1473 if (exp_connect_som(ll_i2mdexp(inode)) &&
1474 (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1475 op_data->op_flags = MF_EPOCH_OPEN;
1476
1477 rc = ll_md_setattr(dentry, op_data, &mod);
1478 if (rc)
1479 GOTO(out, rc);
1480
1481 /* RPC to MDT is sent, cancel data modification flag */
1482 if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
1483 spin_lock(&lli->lli_lock);
1484 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
1485 spin_unlock(&lli->lli_lock);
1486 }
1487
1488 ll_ioepoch_open(lli, op_data->op_ioepoch);
1489 if (!S_ISREG(inode->i_mode))
1490 GOTO(out, rc = 0);
1491
1492 if (attr->ia_valid & (ATTR_SIZE |
1493 ATTR_ATIME | ATTR_ATIME_SET |
1494 ATTR_MTIME | ATTR_MTIME_SET))
1495 /* For truncate and utimes sending attributes to OSTs, setting
1496 * mtime/atime to the past will be performed under PW [0:EOF]
1497 * extent lock (new_size:EOF for truncate). It may seem
1498 * excessive to send mtime/atime updates to OSTs when not
1499 * setting times to past, but it is necessary due to possible
1500 * time de-synchronization between MDT inode and OST objects */
1501 rc = ll_setattr_ost(inode, attr);
1502 EXIT;
1503out:
1504 if (op_data) {
1505 if (op_data->op_ioepoch) {
1506 rc1 = ll_setattr_done_writing(inode, op_data, mod);
1507 if (!rc)
1508 rc = rc1;
1509 }
1510 ll_finish_md_op_data(op_data);
1511 }
1512 if (!S_ISDIR(inode->i_mode)) {
1513 up_write(&lli->lli_trunc_sem);
1514 mutex_lock(&inode->i_mutex);
1515 if (attr->ia_valid & ATTR_SIZE)
1516 inode_dio_wait(inode);
1517 }
1518
1519 ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1520 LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1521
1522 return rc;
1523}
1524
1525int ll_setattr(struct dentry *de, struct iattr *attr)
1526{
1527 int mode = de->d_inode->i_mode;
1528
1529 if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1530 (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1531 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1532
1533 if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
1534 (ATTR_SIZE|ATTR_MODE)) &&
1535 (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
1536 (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1537 !(attr->ia_mode & S_ISGID))))
1538 attr->ia_valid |= ATTR_FORCE;
1539
1540 if ((mode & S_ISUID) &&
1541 !(attr->ia_mode & S_ISUID) &&
1542 !(attr->ia_valid & ATTR_KILL_SUID))
1543 attr->ia_valid |= ATTR_KILL_SUID;
1544
1545 if (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1546 !(attr->ia_mode & S_ISGID) &&
1547 !(attr->ia_valid & ATTR_KILL_SGID))
1548 attr->ia_valid |= ATTR_KILL_SGID;
1549
1550 return ll_setattr_raw(de, attr);
1551}
1552
1553int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1554 __u64 max_age, __u32 flags)
1555{
1556 struct ll_sb_info *sbi = ll_s2sbi(sb);
1557 struct obd_statfs obd_osfs;
1558 int rc;
1559 ENTRY;
1560
1561 rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1562 if (rc) {
1563 CERROR("md_statfs fails: rc = %d\n", rc);
1564 RETURN(rc);
1565 }
1566
1567 osfs->os_type = sb->s_magic;
1568
1569 CDEBUG(D_SUPER, "MDC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1570 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,osfs->os_files);
1571
1572 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1573 flags |= OBD_STATFS_NODELAY;
1574
1575 rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1576 if (rc) {
1577 CERROR("obd_statfs fails: rc = %d\n", rc);
1578 RETURN(rc);
1579 }
1580
1581 CDEBUG(D_SUPER, "OSC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1582 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1583 obd_osfs.os_files);
1584
1585 osfs->os_bsize = obd_osfs.os_bsize;
1586 osfs->os_blocks = obd_osfs.os_blocks;
1587 osfs->os_bfree = obd_osfs.os_bfree;
1588 osfs->os_bavail = obd_osfs.os_bavail;
1589
1590 /* If we don't have as many objects free on the OST as inodes
1591 * on the MDS, we reduce the total number of inodes to
1592 * compensate, so that the "inodes in use" number is correct.
1593 */
1594 if (obd_osfs.os_ffree < osfs->os_ffree) {
1595 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1596 obd_osfs.os_ffree;
1597 osfs->os_ffree = obd_osfs.os_ffree;
1598 }
1599
1600 RETURN(rc);
1601}
1602int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1603{
1604 struct super_block *sb = de->d_sb;
1605 struct obd_statfs osfs;
1606 int rc;
1607
1608 CDEBUG(D_VFSTRACE, "VFS Op: at "LPU64" jiffies\n", get_jiffies_64());
1609 ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1610
1611 /* Some amount of caching on the client is allowed */
1612 rc = ll_statfs_internal(sb, &osfs,
1613 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1614 0);
1615 if (rc)
1616 return rc;
1617
1618 statfs_unpack(sfs, &osfs);
1619
1620 /* We need to downshift for all 32-bit kernels, because we can't
1621 * tell if the kernel is being called via sys_statfs64() or not.
1622 * Stop before overflowing f_bsize - in which case it is better
1623 * to just risk EOVERFLOW if caller is using old sys_statfs(). */
1624 if (sizeof(long) < 8) {
1625 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1626 sfs->f_bsize <<= 1;
1627
1628 osfs.os_blocks >>= 1;
1629 osfs.os_bfree >>= 1;
1630 osfs.os_bavail >>= 1;
1631 }
1632 }
1633
1634 sfs->f_blocks = osfs.os_blocks;
1635 sfs->f_bfree = osfs.os_bfree;
1636 sfs->f_bavail = osfs.os_bavail;
1637
1638 return 0;
1639}
1640
1641void ll_inode_size_lock(struct inode *inode)
1642{
1643 struct ll_inode_info *lli;
1644
1645 LASSERT(!S_ISDIR(inode->i_mode));
1646
1647 lli = ll_i2info(inode);
1648 LASSERT(lli->lli_size_sem_owner != current);
1649 down(&lli->lli_size_sem);
1650 LASSERT(lli->lli_size_sem_owner == NULL);
1651 lli->lli_size_sem_owner = current;
1652}
1653
1654void ll_inode_size_unlock(struct inode *inode)
1655{
1656 struct ll_inode_info *lli;
1657
1658 lli = ll_i2info(inode);
1659 LASSERT(lli->lli_size_sem_owner == current);
1660 lli->lli_size_sem_owner = NULL;
1661 up(&lli->lli_size_sem);
1662}
1663
1664void ll_update_inode(struct inode *inode, struct lustre_md *md)
1665{
1666 struct ll_inode_info *lli = ll_i2info(inode);
1667 struct mdt_body *body = md->body;
1668 struct lov_stripe_md *lsm = md->lsm;
1669 struct ll_sb_info *sbi = ll_i2sbi(inode);
1670
1671 LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1672 if (lsm != NULL) {
1673 if (!lli->lli_has_smd &&
1674 !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
1675 cl_file_inode_init(inode, md);
1676
1677 lli->lli_maxbytes = lsm->lsm_maxbytes;
1678 if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
1679 lli->lli_maxbytes = MAX_LFS_FILESIZE;
1680 }
1681
1682 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1683 if (body->valid & OBD_MD_FLRMTPERM)
1684 ll_update_remote_perm(inode, md->remote_perm);
1685 }
1686#ifdef CONFIG_FS_POSIX_ACL
1687 else if (body->valid & OBD_MD_FLACL) {
1688 spin_lock(&lli->lli_lock);
1689 if (lli->lli_posix_acl)
1690 posix_acl_release(lli->lli_posix_acl);
1691 lli->lli_posix_acl = md->posix_acl;
1692 spin_unlock(&lli->lli_lock);
1693 }
1694#endif
1695 inode->i_ino = cl_fid_build_ino(&body->fid1, ll_need_32bit_api(sbi));
1696 inode->i_generation = cl_fid_build_gen(&body->fid1);
1697
1698 if (body->valid & OBD_MD_FLATIME) {
1699 if (body->atime > LTIME_S(inode->i_atime))
1700 LTIME_S(inode->i_atime) = body->atime;
1701 lli->lli_lvb.lvb_atime = body->atime;
1702 }
1703 if (body->valid & OBD_MD_FLMTIME) {
1704 if (body->mtime > LTIME_S(inode->i_mtime)) {
1705 CDEBUG(D_INODE, "setting ino %lu mtime from %lu "
1706 "to "LPU64"\n", inode->i_ino,
1707 LTIME_S(inode->i_mtime), body->mtime);
1708 LTIME_S(inode->i_mtime) = body->mtime;
1709 }
1710 lli->lli_lvb.lvb_mtime = body->mtime;
1711 }
1712 if (body->valid & OBD_MD_FLCTIME) {
1713 if (body->ctime > LTIME_S(inode->i_ctime))
1714 LTIME_S(inode->i_ctime) = body->ctime;
1715 lli->lli_lvb.lvb_ctime = body->ctime;
1716 }
1717 if (body->valid & OBD_MD_FLMODE)
1718 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1719 if (body->valid & OBD_MD_FLTYPE)
1720 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1721 LASSERT(inode->i_mode != 0);
1722 if (S_ISREG(inode->i_mode)) {
1723 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
1724 } else {
1725 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1726 }
1727 if (body->valid & OBD_MD_FLUID)
1728 inode->i_uid = body->uid;
1729 if (body->valid & OBD_MD_FLGID)
1730 inode->i_gid = body->gid;
1731 if (body->valid & OBD_MD_FLFLAGS)
1732 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1733 if (body->valid & OBD_MD_FLNLINK)
1734 set_nlink(inode, body->nlink);
1735 if (body->valid & OBD_MD_FLRDEV)
1736 inode->i_rdev = old_decode_dev(body->rdev);
1737
1738 if (body->valid & OBD_MD_FLID) {
1739 /* FID shouldn't be changed! */
1740 if (fid_is_sane(&lli->lli_fid)) {
1741 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1742 "Trying to change FID "DFID
1743 " to the "DFID", inode %lu/%u(%p)\n",
1744 PFID(&lli->lli_fid), PFID(&body->fid1),
1745 inode->i_ino, inode->i_generation, inode);
1746 } else
1747 lli->lli_fid = body->fid1;
1748 }
1749
1750 LASSERT(fid_seq(&lli->lli_fid) != 0);
1751
1752 if (body->valid & OBD_MD_FLSIZE) {
1753 if (exp_connect_som(ll_i2mdexp(inode)) &&
1754 S_ISREG(inode->i_mode)) {
1755 struct lustre_handle lockh;
1756 ldlm_mode_t mode;
1757
1758 /* As it is possible a blocking ast has been processed
1759 * by this time, we need to check there is an UPDATE
1760 * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1761 * it. */
1762 mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1763 &lockh, LDLM_FL_CBPENDING);
1764 if (mode) {
1765 if (lli->lli_flags & (LLIF_DONE_WRITING |
1766 LLIF_EPOCH_PENDING |
1767 LLIF_SOM_DIRTY)) {
1768 CERROR("ino %lu flags %u still has "
1769 "size authority! do not trust "
1770 "the size got from MDS\n",
1771 inode->i_ino, lli->lli_flags);
1772 } else {
1773 /* Use old size assignment to avoid
1774 * deadlock bz14138 & bz14326 */
1775 i_size_write(inode, body->size);
1776 lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1777 }
1778 ldlm_lock_decref(&lockh, mode);
1779 }
1780 } else {
1781 /* Use old size assignment to avoid
1782 * deadlock bz14138 & bz14326 */
1783 i_size_write(inode, body->size);
1784
1785 CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
1786 inode->i_ino, (unsigned long long)body->size);
1787 }
1788
1789 if (body->valid & OBD_MD_FLBLOCKS)
1790 inode->i_blocks = body->blocks;
1791 }
1792
1793 if (body->valid & OBD_MD_FLMDSCAPA) {
1794 LASSERT(md->mds_capa);
1795 ll_add_capa(inode, md->mds_capa);
1796 }
1797 if (body->valid & OBD_MD_FLOSSCAPA) {
1798 LASSERT(md->oss_capa);
1799 ll_add_capa(inode, md->oss_capa);
1800 }
1801}
1802
1803void ll_read_inode2(struct inode *inode, void *opaque)
1804{
1805 struct lustre_md *md = opaque;
1806 struct ll_inode_info *lli = ll_i2info(inode);
1807 ENTRY;
1808
1809 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1810 PFID(&lli->lli_fid), inode);
1811
1812 LASSERT(!lli->lli_has_smd);
1813
1814 /* Core attributes from the MDS first. This is a new inode, and
1815 * the VFS doesn't zero times in the core inode so we have to do
1816 * it ourselves. They will be overwritten by either MDS or OST
1817 * attributes - we just need to make sure they aren't newer. */
1818 LTIME_S(inode->i_mtime) = 0;
1819 LTIME_S(inode->i_atime) = 0;
1820 LTIME_S(inode->i_ctime) = 0;
1821 inode->i_rdev = 0;
1822 ll_update_inode(inode, md);
1823
1824 /* OIDEBUG(inode); */
1825
1826 /* initializing backing dev info. */
1827 inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
1828
1829
1830 if (S_ISREG(inode->i_mode)) {
1831 struct ll_sb_info *sbi = ll_i2sbi(inode);
1832 inode->i_op = &ll_file_inode_operations;
1833 inode->i_fop = sbi->ll_fop;
1834 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
1835 EXIT;
1836 } else if (S_ISDIR(inode->i_mode)) {
1837 inode->i_op = &ll_dir_inode_operations;
1838 inode->i_fop = &ll_dir_operations;
1839 EXIT;
1840 } else if (S_ISLNK(inode->i_mode)) {
1841 inode->i_op = &ll_fast_symlink_inode_operations;
1842 EXIT;
1843 } else {
1844 inode->i_op = &ll_special_inode_operations;
1845
1846 init_special_inode(inode, inode->i_mode,
1847 inode->i_rdev);
1848
1849 EXIT;
1850 }
1851}
1852
1853void ll_delete_inode(struct inode *inode)
1854{
1855 struct cl_inode_info *lli = cl_i2info(inode);
1856 ENTRY;
1857
1858 if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
1859 /* discard all dirty pages before truncating them, required by
1860 * osc_extent implementation at LU-1030. */
65fb55d1
NY
1861 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
1862 CL_FSYNC_DISCARD, 1);
d7e09d03
PT
1863
1864 truncate_inode_pages(&inode->i_data, 0);
1865
1866 /* Workaround for LU-118 */
1867 if (inode->i_data.nrpages) {
1868 TREE_READ_LOCK_IRQ(&inode->i_data);
1869 TREE_READ_UNLOCK_IRQ(&inode->i_data);
1870 LASSERTF(inode->i_data.nrpages == 0,
1871 "inode=%lu/%u(%p) nrpages=%lu, see "
1872 "http://jira.whamcloud.com/browse/LU-118\n",
1873 inode->i_ino, inode->i_generation, inode,
1874 inode->i_data.nrpages);
1875 }
1876 /* Workaround end */
1877
1878 ll_clear_inode(inode);
1879 clear_inode(inode);
1880
1881 EXIT;
1882}
1883
1884int ll_iocontrol(struct inode *inode, struct file *file,
1885 unsigned int cmd, unsigned long arg)
1886{
1887 struct ll_sb_info *sbi = ll_i2sbi(inode);
1888 struct ptlrpc_request *req = NULL;
1889 int rc, flags = 0;
1890 ENTRY;
1891
1892 switch(cmd) {
1893 case FSFILT_IOC_GETFLAGS: {
1894 struct mdt_body *body;
1895 struct md_op_data *op_data;
1896
1897 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1898 0, 0, LUSTRE_OPC_ANY,
1899 NULL);
1900 if (IS_ERR(op_data))
1901 RETURN(PTR_ERR(op_data));
1902
1903 op_data->op_valid = OBD_MD_FLFLAGS;
1904 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1905 ll_finish_md_op_data(op_data);
1906 if (rc) {
1907 CERROR("failure %d inode %lu\n", rc, inode->i_ino);
1908 RETURN(-abs(rc));
1909 }
1910
1911 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1912
1913 flags = body->flags;
1914
1915 ptlrpc_req_finished(req);
1916
1917 RETURN(put_user(flags, (int *)arg));
1918 }
1919 case FSFILT_IOC_SETFLAGS: {
1920 struct lov_stripe_md *lsm;
1921 struct obd_info oinfo = { { { 0 } } };
1922 struct md_op_data *op_data;
1923
1924 if (get_user(flags, (int *)arg))
1925 RETURN(-EFAULT);
1926
1927 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1928 LUSTRE_OPC_ANY, NULL);
1929 if (IS_ERR(op_data))
1930 RETURN(PTR_ERR(op_data));
1931
1932 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
1933 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1934 rc = md_setattr(sbi->ll_md_exp, op_data,
1935 NULL, 0, NULL, 0, &req, NULL);
1936 ll_finish_md_op_data(op_data);
1937 ptlrpc_req_finished(req);
1938 if (rc)
1939 RETURN(rc);
1940
1941 inode->i_flags = ll_ext_to_inode_flags(flags);
1942
1943 lsm = ccc_inode_lsm_get(inode);
1944 if (lsm == NULL)
1945 RETURN(0);
1946
1947 OBDO_ALLOC(oinfo.oi_oa);
1948 if (!oinfo.oi_oa) {
1949 ccc_inode_lsm_put(inode, lsm);
1950 RETURN(-ENOMEM);
1951 }
1952 oinfo.oi_md = lsm;
1953 oinfo.oi_oa->o_oi = lsm->lsm_oi;
1954 oinfo.oi_oa->o_flags = flags;
1955 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
1956 OBD_MD_FLGROUP;
1957 oinfo.oi_capa = ll_mdscapa_get(inode);
1958 obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
1959 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1960 capa_put(oinfo.oi_capa);
1961 OBDO_FREE(oinfo.oi_oa);
1962 ccc_inode_lsm_put(inode, lsm);
1963
1964 if (rc && rc != -EPERM && rc != -EACCES)
1965 CERROR("osc_setattr_async fails: rc = %d\n", rc);
1966
1967 RETURN(rc);
1968 }
1969 default:
1970 RETURN(-ENOSYS);
1971 }
1972
1973 RETURN(0);
1974}
1975
1976int ll_flush_ctx(struct inode *inode)
1977{
1978 struct ll_sb_info *sbi = ll_i2sbi(inode);
1979
1980 CDEBUG(D_SEC, "flush context for user %d\n", current_uid());
1981
1982 obd_set_info_async(NULL, sbi->ll_md_exp,
1983 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1984 0, NULL, NULL);
1985 obd_set_info_async(NULL, sbi->ll_dt_exp,
1986 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1987 0, NULL, NULL);
1988 return 0;
1989}
1990
1991/* umount -f client means force down, don't save state */
1992void ll_umount_begin(struct super_block *sb)
1993{
1994 struct ll_sb_info *sbi = ll_s2sbi(sb);
1995 struct obd_device *obd;
1996 struct obd_ioctl_data *ioc_data;
1997 ENTRY;
1998
1999
2000 CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2001 sb->s_count, atomic_read(&sb->s_active));
2002
2003 obd = class_exp2obd(sbi->ll_md_exp);
2004 if (obd == NULL) {
2005 CERROR("Invalid MDC connection handle "LPX64"\n",
2006 sbi->ll_md_exp->exp_handle.h_cookie);
2007 EXIT;
2008 return;
2009 }
2010 obd->obd_force = 1;
2011
2012 obd = class_exp2obd(sbi->ll_dt_exp);
2013 if (obd == NULL) {
2014 CERROR("Invalid LOV connection handle "LPX64"\n",
2015 sbi->ll_dt_exp->exp_handle.h_cookie);
2016 EXIT;
2017 return;
2018 }
2019 obd->obd_force = 1;
2020
2021 OBD_ALLOC_PTR(ioc_data);
2022 if (ioc_data) {
2023 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2024 sizeof *ioc_data, ioc_data, NULL);
2025
2026 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2027 sizeof *ioc_data, ioc_data, NULL);
2028
2029 OBD_FREE_PTR(ioc_data);
2030 }
2031
d7e09d03
PT
2032 /* Really, we'd like to wait until there are no requests outstanding,
2033 * and then continue. For now, we just invalidate the requests,
2034 * schedule() and sleep one second if needed, and hope.
2035 */
2036 schedule();
2037
2038 EXIT;
2039}
2040
2041int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2042{
2043 struct ll_sb_info *sbi = ll_s2sbi(sb);
2044 char *profilenm = get_profile_name(sb);
2045 int err;
2046 __u32 read_only;
2047
2048 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2049 read_only = *flags & MS_RDONLY;
2050 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2051 sizeof(KEY_READ_ONLY),
2052 KEY_READ_ONLY, sizeof(read_only),
2053 &read_only, NULL);
2054 if (err) {
2055 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2056 profilenm, read_only ?
2057 "read-only" : "read-write", err);
2058 return err;
2059 }
2060
2061 if (read_only)
2062 sb->s_flags |= MS_RDONLY;
2063 else
2064 sb->s_flags &= ~MS_RDONLY;
2065
2066 if (sbi->ll_flags & LL_SBI_VERBOSE)
2067 LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2068 read_only ? "read-only" : "read-write");
2069 }
2070 return 0;
2071}
2072
2073int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2074 struct super_block *sb, struct lookup_intent *it)
2075{
2076 struct ll_sb_info *sbi = NULL;
2077 struct lustre_md md;
2078 int rc;
2079 ENTRY;
2080
2081 LASSERT(*inode || sb);
2082 sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2083 rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2084 sbi->ll_md_exp, &md);
2085 if (rc)
2086 RETURN(rc);
2087
2088 if (*inode) {
2089 ll_update_inode(*inode, &md);
2090 } else {
2091 LASSERT(sb != NULL);
2092
2093 /*
2094 * At this point server returns to client's same fid as client
2095 * generated for creating. So using ->fid1 is okay here.
2096 */
2097 LASSERT(fid_is_sane(&md.body->fid1));
2098
2099 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
2100 ll_need_32bit_api(sbi)),
2101 &md);
2102 if (*inode == NULL || IS_ERR(*inode)) {
2103#ifdef CONFIG_FS_POSIX_ACL
2104 if (md.posix_acl) {
2105 posix_acl_release(md.posix_acl);
2106 md.posix_acl = NULL;
2107 }
2108#endif
2109 rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2110 *inode = NULL;
2111 CERROR("new_inode -fatal: rc %d\n", rc);
2112 GOTO(out, rc);
2113 }
2114 }
2115
2116 /* Handling piggyback layout lock.
2117 * Layout lock can be piggybacked by getattr and open request.
2118 * The lsm can be applied to inode only if it comes with a layout lock
2119 * otherwise correct layout may be overwritten, for example:
2120 * 1. proc1: mdt returns a lsm but not granting layout
2121 * 2. layout was changed by another client
2122 * 3. proc2: refresh layout and layout lock granted
2123 * 4. proc1: to apply a stale layout */
2124 if (it != NULL && it->d.lustre.it_lock_mode != 0) {
2125 struct lustre_handle lockh;
2126 struct ldlm_lock *lock;
2127
2128 lockh.cookie = it->d.lustre.it_lock_handle;
2129 lock = ldlm_handle2lock(&lockh);
2130 LASSERT(lock != NULL);
2131 if (ldlm_has_layout(lock)) {
2132 struct cl_object_conf conf;
2133
2134 memset(&conf, 0, sizeof(conf));
2135 conf.coc_opc = OBJECT_CONF_SET;
2136 conf.coc_inode = *inode;
2137 conf.coc_lock = lock;
2138 conf.u.coc_md = &md;
2139 (void)ll_layout_conf(*inode, &conf);
2140 }
2141 LDLM_LOCK_PUT(lock);
2142 }
2143
2144out:
2145 if (md.lsm != NULL)
2146 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2147 md_free_lustre_md(sbi->ll_md_exp, &md);
2148 RETURN(rc);
2149}
2150
2151int ll_obd_statfs(struct inode *inode, void *arg)
2152{
2153 struct ll_sb_info *sbi = NULL;
2154 struct obd_export *exp;
2155 char *buf = NULL;
2156 struct obd_ioctl_data *data = NULL;
2157 __u32 type;
2158 __u32 flags;
2159 int len = 0, rc;
2160
2161 if (!inode || !(sbi = ll_i2sbi(inode)))
2162 GOTO(out_statfs, rc = -EINVAL);
2163
2164 rc = obd_ioctl_getdata(&buf, &len, arg);
2165 if (rc)
2166 GOTO(out_statfs, rc);
2167
2168 data = (void*)buf;
2169 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2170 !data->ioc_pbuf1 || !data->ioc_pbuf2)
2171 GOTO(out_statfs, rc = -EINVAL);
2172
2173 if (data->ioc_inllen1 != sizeof(__u32) ||
2174 data->ioc_inllen2 != sizeof(__u32) ||
2175 data->ioc_plen1 != sizeof(struct obd_statfs) ||
2176 data->ioc_plen2 != sizeof(struct obd_uuid))
2177 GOTO(out_statfs, rc = -EINVAL);
2178
2179 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2180 if (type & LL_STATFS_LMV)
2181 exp = sbi->ll_md_exp;
2182 else if (type & LL_STATFS_LOV)
2183 exp = sbi->ll_dt_exp;
2184 else
2185 GOTO(out_statfs, rc = -ENODEV);
2186
2187 flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0;
2188 rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags);
2189 if (rc)
2190 GOTO(out_statfs, rc);
2191out_statfs:
2192 if (buf)
2193 obd_ioctl_freedata(buf, len);
2194 return rc;
2195}
2196
2197int ll_process_config(struct lustre_cfg *lcfg)
2198{
2199 char *ptr;
2200 void *sb;
2201 struct lprocfs_static_vars lvars;
2202 unsigned long x;
2203 int rc = 0;
2204
2205 lprocfs_llite_init_vars(&lvars);
2206
2207 /* The instance name contains the sb: lustre-client-aacfe000 */
2208 ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2209 if (!ptr || !*(++ptr))
2210 return -EINVAL;
2211 if (sscanf(ptr, "%lx", &x) != 1)
2212 return -EINVAL;
2213 sb = (void *)x;
2214 /* This better be a real Lustre superblock! */
2215 LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2216
2217 /* Note we have not called client_common_fill_super yet, so
2218 proc fns must be able to handle that! */
2219 rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2220 lcfg, sb);
2221 if (rc > 0)
2222 rc = 0;
2223 return(rc);
2224}
2225
2226/* this function prepares md_op_data hint for passing ot down to MD stack. */
2227struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
2228 struct inode *i1, struct inode *i2,
2229 const char *name, int namelen,
2230 int mode, __u32 opc, void *data)
2231{
2232 LASSERT(i1 != NULL);
2233
2234 if (namelen > ll_i2sbi(i1)->ll_namelen)
2235 return ERR_PTR(-ENAMETOOLONG);
2236
2237 if (op_data == NULL)
2238 OBD_ALLOC_PTR(op_data);
2239
2240 if (op_data == NULL)
2241 return ERR_PTR(-ENOMEM);
2242
2243 ll_i2gids(op_data->op_suppgids, i1, i2);
2244 op_data->op_fid1 = *ll_inode2fid(i1);
2245 op_data->op_capa1 = ll_mdscapa_get(i1);
2246
2247 if (i2) {
2248 op_data->op_fid2 = *ll_inode2fid(i2);
2249 op_data->op_capa2 = ll_mdscapa_get(i2);
2250 } else {
2251 fid_zero(&op_data->op_fid2);
2252 op_data->op_capa2 = NULL;
2253 }
2254
2255 op_data->op_name = name;
2256 op_data->op_namelen = namelen;
2257 op_data->op_mode = mode;
2258 op_data->op_mod_time = cfs_time_current_sec();
2259 op_data->op_fsuid = current_fsuid();
2260 op_data->op_fsgid = current_fsgid();
2261 op_data->op_cap = cfs_curproc_cap_pack();
2262 op_data->op_bias = 0;
2263 op_data->op_cli_flags = 0;
2264 if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
2265 filename_is_volatile(name, namelen, NULL))
2266 op_data->op_bias |= MDS_CREATE_VOLATILE;
2267 op_data->op_opc = opc;
2268 op_data->op_mds = 0;
2269 op_data->op_data = data;
2270
2271 /* If the file is being opened after mknod() (normally due to NFS)
2272 * try to use the default stripe data from parent directory for
2273 * allocating OST objects. Try to pass the parent FID to MDS. */
2274 if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
2275 !ll_i2info(i2)->lli_has_smd) {
2276 struct ll_inode_info *lli = ll_i2info(i2);
2277
2278 spin_lock(&lli->lli_lock);
2279 if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
2280 op_data->op_fid1 = lli->lli_pfid;
2281 spin_unlock(&lli->lli_lock);
2282 /** We ignore parent's capability temporary. */
2283 }
2284
2285 /* When called by ll_setattr_raw, file is i1. */
2286 if (LLIF_DATA_MODIFIED & ll_i2info(i1)->lli_flags)
2287 op_data->op_bias |= MDS_DATA_MODIFIED;
2288
2289 return op_data;
2290}
2291
2292void ll_finish_md_op_data(struct md_op_data *op_data)
2293{
2294 capa_put(op_data->op_capa1);
2295 capa_put(op_data->op_capa2);
2296 OBD_FREE_PTR(op_data);
2297}
2298
2299int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2300{
2301 struct ll_sb_info *sbi;
2302
2303 LASSERT((seq != NULL) && (dentry != NULL));
2304 sbi = ll_s2sbi(dentry->d_sb);
2305
2306 if (sbi->ll_flags & LL_SBI_NOLCK)
2307 seq_puts(seq, ",nolock");
2308
2309 if (sbi->ll_flags & LL_SBI_FLOCK)
2310 seq_puts(seq, ",flock");
2311
2312 if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2313 seq_puts(seq, ",localflock");
2314
2315 if (sbi->ll_flags & LL_SBI_USER_XATTR)
2316 seq_puts(seq, ",user_xattr");
2317
2318 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2319 seq_puts(seq, ",lazystatfs");
2320
2321 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2322 seq_puts(seq, ",user_fid2path");
2323
2324 RETURN(0);
2325}
2326
2327/**
2328 * Get obd name by cmd, and copy out to user space
2329 */
2330int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2331{
2332 struct ll_sb_info *sbi = ll_i2sbi(inode);
2333 struct obd_device *obd;
2334 ENTRY;
2335
2336 if (cmd == OBD_IOC_GETDTNAME)
2337 obd = class_exp2obd(sbi->ll_dt_exp);
2338 else if (cmd == OBD_IOC_GETMDNAME)
2339 obd = class_exp2obd(sbi->ll_md_exp);
2340 else
2341 RETURN(-EINVAL);
2342
2343 if (!obd)
2344 RETURN(-ENOENT);
2345
2346 if (copy_to_user((void *)arg, obd->obd_name,
2347 strlen(obd->obd_name) + 1))
2348 RETURN(-EFAULT);
2349
2350 RETURN(0);
2351}
2352
2353/**
2354 * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2355 * fsname will be returned in this buffer; otherwise, a static buffer will be
2356 * used to store the fsname and returned to caller.
2357 */
2358char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2359{
2360 static char fsname_static[MTI_NAME_MAXLEN];
2361 struct lustre_sb_info *lsi = s2lsi(sb);
2362 char *ptr;
2363 int len;
2364
2365 if (buf == NULL) {
2366 /* this means the caller wants to use static buffer
2367 * and it doesn't care about race. Usually this is
2368 * in error reporting path */
2369 buf = fsname_static;
2370 buflen = sizeof(fsname_static);
2371 }
2372
2373 len = strlen(lsi->lsi_lmd->lmd_profile);
2374 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2375 if (ptr && (strcmp(ptr, "-client") == 0))
2376 len -= 7;
2377
2378 if (unlikely(len >= buflen))
2379 len = buflen - 1;
2380 strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2381 buf[len] = '\0';
2382
2383 return buf;
2384}
2385
2386static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
2387{
2388 char *path = NULL;
2389
2390 struct path p;
2391
2392 p.dentry = dentry;
2393 p.mnt = current->fs->root.mnt;
2394 path_get(&p);
2395 path = d_path(&p, buf, bufsize);
2396 path_put(&p);
2397
2398 return path;
2399}
2400
2401void ll_dirty_page_discard_warn(struct page *page, int ioret)
2402{
2403 char *buf, *path = NULL;
2404 struct dentry *dentry = NULL;
2405 struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
2406
2407 /* this can be called inside spin lock so use GFP_ATOMIC. */
2408 buf = (char *)__get_free_page(GFP_ATOMIC);
2409 if (buf != NULL) {
2410 dentry = d_find_alias(page->mapping->host);
2411 if (dentry != NULL)
2412 path = ll_d_path(dentry, buf, PAGE_SIZE);
2413 }
2414
2415 CWARN("%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
2416 "(rc %d)\n", ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
2417 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
2418 PFID(&obj->cob_header.coh_lu.loh_fid),
2419 (path && !IS_ERR(path)) ? path : "", ioret);
2420
2421 if (dentry != NULL)
2422 dput(dentry);
2423
2424 if (buf != NULL)
2425 free_page((unsigned long)buf);
2426}