staging/lustre/obdclass: be more careful processing server name
[linux-2.6-block.git] / drivers / staging / lustre / lustre / llite / llite_lib.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/llite/llite_lib.c
37 *
38 * Lustre Light Super operations
39 */
40
41#define DEBUG_SUBSYSTEM S_LLITE
42
43#include <linux/module.h>
44#include <linux/types.h>
45#include <linux/version.h>
46#include <linux/mm.h>
47
48#include <lustre_lite.h>
49#include <lustre_ha.h>
50#include <lustre_dlm.h>
51#include <lprocfs_status.h>
52#include <lustre_disk.h>
53#include <lustre_param.h>
54#include <lustre_log.h>
55#include <cl_object.h>
56#include <obd_cksum.h>
57#include "llite_internal.h"
58
59struct kmem_cache *ll_file_data_slab;
60
61LIST_HEAD(ll_super_blocks);
62DEFINE_SPINLOCK(ll_sb_lock);
63
64#ifndef MS_HAS_NEW_AOPS
65extern struct address_space_operations ll_aops;
66#else
67extern struct address_space_operations_ext ll_aops;
68#endif
69
70#ifndef log2
71#define log2(n) ffz(~(n))
72#endif
73
74static struct ll_sb_info *ll_init_sbi(void)
75{
76 struct ll_sb_info *sbi = NULL;
77 unsigned long pages;
78 unsigned long lru_page_max;
79 struct sysinfo si;
80 class_uuid_t uuid;
81 int i;
82 ENTRY;
83
84 OBD_ALLOC(sbi, sizeof(*sbi));
85 if (!sbi)
86 RETURN(NULL);
87
88 spin_lock_init(&sbi->ll_lock);
89 mutex_init(&sbi->ll_lco.lco_lock);
90 spin_lock_init(&sbi->ll_pp_extent_lock);
91 spin_lock_init(&sbi->ll_process_lock);
92 sbi->ll_rw_stats_on = 0;
93
94 si_meminfo(&si);
95 pages = si.totalram - si.totalhigh;
96 if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) {
97 lru_page_max = pages / 2;
98 } else {
99 lru_page_max = (pages / 4) * 3;
100 }
101
c52f69c5 102 /* initialize lru data */
d7e09d03
PT
103 atomic_set(&sbi->ll_cache.ccc_users, 0);
104 sbi->ll_cache.ccc_lru_max = lru_page_max;
105 atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
106 spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
107 INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
108
d7e09d03
PT
109 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
110 SBI_DEFAULT_READAHEAD_MAX);
111 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
112 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
113 SBI_DEFAULT_READAHEAD_WHOLE_MAX;
114 INIT_LIST_HEAD(&sbi->ll_conn_chain);
115 INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
116
117 ll_generate_random_uuid(uuid);
118 class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
119 CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
120
121 spin_lock(&ll_sb_lock);
122 list_add_tail(&sbi->ll_list, &ll_super_blocks);
123 spin_unlock(&ll_sb_lock);
124
125 sbi->ll_flags |= LL_SBI_VERBOSE;
126 sbi->ll_flags |= LL_SBI_CHECKSUM;
127
128 sbi->ll_flags |= LL_SBI_LRU_RESIZE;
129
130 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
131 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
132 pp_r_hist.oh_lock);
133 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
134 pp_w_hist.oh_lock);
135 }
136
137 /* metadata statahead is enabled by default */
138 sbi->ll_sa_max = LL_SA_RPC_DEF;
139 atomic_set(&sbi->ll_sa_total, 0);
140 atomic_set(&sbi->ll_sa_wrong, 0);
141 atomic_set(&sbi->ll_agl_total, 0);
142 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
143
144 RETURN(sbi);
145}
146
147void ll_free_sbi(struct super_block *sb)
148{
149 struct ll_sb_info *sbi = ll_s2sbi(sb);
150 ENTRY;
151
152 if (sbi != NULL) {
153 spin_lock(&ll_sb_lock);
154 list_del(&sbi->ll_list);
155 spin_unlock(&ll_sb_lock);
156 OBD_FREE(sbi, sizeof(*sbi));
157 }
158 EXIT;
159}
160
161static struct dentry_operations ll_d_root_ops = {
162 .d_compare = ll_dcompare,
163 .d_revalidate = ll_revalidate_nd,
164};
165
166static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
167 struct vfsmount *mnt)
168{
169 struct inode *root = 0;
170 struct ll_sb_info *sbi = ll_s2sbi(sb);
171 struct obd_device *obd;
172 struct obd_capa *oc = NULL;
173 struct obd_statfs *osfs = NULL;
174 struct ptlrpc_request *request = NULL;
175 struct obd_connect_data *data = NULL;
176 struct obd_uuid *uuid;
177 struct md_op_data *op_data;
178 struct lustre_md lmd;
179 obd_valid valid;
180 int size, err, checksum;
181 ENTRY;
182
183 obd = class_name2obd(md);
184 if (!obd) {
185 CERROR("MD %s: not setup or attached\n", md);
186 RETURN(-EINVAL);
187 }
188
189 OBD_ALLOC_PTR(data);
190 if (data == NULL)
191 RETURN(-ENOMEM);
192
193 OBD_ALLOC_PTR(osfs);
194 if (osfs == NULL) {
195 OBD_FREE_PTR(data);
196 RETURN(-ENOMEM);
197 }
198
199 if (proc_lustre_fs_root) {
200 err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
201 dt, md);
202 if (err < 0)
203 CERROR("could not register mount in /proc/fs/lustre\n");
204 }
205
206 /* indicate the features supported by this client */
207 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
208 OBD_CONNECT_ATTRFID |
209 OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
210 OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
211 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
212 OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
213 OBD_CONNECT_RMT_CLIENT | OBD_CONNECT_VBR |
214 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
215 OBD_CONNECT_EINPROGRESS |
216 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
217 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
218
219 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
220 data->ocd_connect_flags |= OBD_CONNECT_SOM;
221
222 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
223 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
224#ifdef CONFIG_FS_POSIX_ACL
225 data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
226#endif
227
228 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
229 /* flag mdc connection as lightweight, only used for test
230 * purpose, use with care */
231 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
232
233 data->ocd_ibits_known = MDS_INODELOCK_FULL;
234 data->ocd_version = LUSTRE_VERSION_CODE;
235
236 if (sb->s_flags & MS_RDONLY)
237 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
238 if (sbi->ll_flags & LL_SBI_USER_XATTR)
239 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
240
241#ifdef HAVE_MS_FLOCK_LOCK
242 /* force vfs to use lustre handler for flock() calls - bug 10743 */
243 sb->s_flags |= MS_FLOCK_LOCK;
244#endif
245#ifdef MS_HAS_NEW_AOPS
246 sb->s_flags |= MS_HAS_NEW_AOPS;
247#endif
248
249 if (sbi->ll_flags & LL_SBI_FLOCK)
250 sbi->ll_fop = &ll_file_operations_flock;
251 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
252 sbi->ll_fop = &ll_file_operations;
253 else
254 sbi->ll_fop = &ll_file_operations_noflock;
255
256 /* real client */
257 data->ocd_connect_flags |= OBD_CONNECT_REAL;
258 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
259 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
260
261 data->ocd_brw_size = MD_MAX_BRW_SIZE;
262
263 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid, data, NULL);
264 if (err == -EBUSY) {
265 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
266 "recovery, of which this client is not a "
267 "part. Please wait for recovery to complete,"
268 " abort, or time out.\n", md);
269 GOTO(out, err);
270 } else if (err) {
271 CERROR("cannot connect to %s: rc = %d\n", md, err);
272 GOTO(out, err);
273 }
274
275 sbi->ll_md_exp->exp_connect_data = *data;
276
277 err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
278 LUSTRE_SEQ_METADATA);
279 if (err) {
280 CERROR("%s: Can't init metadata layer FID infrastructure, "
281 "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
282 GOTO(out_md, err);
283 }
284
285 /* For mount, we only need fs info from MDT0, and also in DNE, it
286 * can make sure the client can be mounted as long as MDT0 is
287 * avaible */
288 err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
289 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
290 OBD_STATFS_FOR_MDT0);
291 if (err)
292 GOTO(out_md_fid, err);
293
294 /* This needs to be after statfs to ensure connect has finished.
295 * Note that "data" does NOT contain the valid connect reply.
296 * If connecting to a 1.8 server there will be no LMV device, so
297 * we can access the MDC export directly and exp_connect_flags will
298 * be non-zero, but if accessing an upgraded 2.1 server it will
299 * have the correct flags filled in.
300 * XXX: fill in the LMV exp_connect_flags from MDC(s). */
301 valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
302 if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
303 valid != CLIENT_CONNECT_MDT_REQD) {
304 char *buf;
305
306 OBD_ALLOC_WAIT(buf, PAGE_CACHE_SIZE);
307 obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
308 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
309 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
310 "feature(s) needed for correct operation "
311 "of this client (%s). Please upgrade "
312 "server or downgrade client.\n",
313 sbi->ll_md_exp->exp_obd->obd_name, buf);
314 OBD_FREE(buf, PAGE_CACHE_SIZE);
315 GOTO(out_md_fid, err = -EPROTO);
316 }
317
318 size = sizeof(*data);
319 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
320 KEY_CONN_DATA, &size, data, NULL);
321 if (err) {
322 CERROR("%s: Get connect data failed: rc = %d\n",
323 sbi->ll_md_exp->exp_obd->obd_name, err);
324 GOTO(out_md_fid, err);
325 }
326
327 LASSERT(osfs->os_bsize);
328 sb->s_blocksize = osfs->os_bsize;
329 sb->s_blocksize_bits = log2(osfs->os_bsize);
330 sb->s_magic = LL_SUPER_MAGIC;
331 sb->s_maxbytes = MAX_LFS_FILESIZE;
332 sbi->ll_namelen = osfs->os_namelen;
333 sbi->ll_max_rw_chunk = LL_DEFAULT_MAX_RW_CHUNK;
334
335 if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
336 !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
337 LCONSOLE_INFO("Disabling user_xattr feature because "
338 "it is not supported on the server\n");
339 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
340 }
341
342 if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
343#ifdef MS_POSIXACL
344 sb->s_flags |= MS_POSIXACL;
345#endif
346 sbi->ll_flags |= LL_SBI_ACL;
347 } else {
348 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
349#ifdef MS_POSIXACL
350 sb->s_flags &= ~MS_POSIXACL;
351#endif
352 sbi->ll_flags &= ~LL_SBI_ACL;
353 }
354
355 if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
356 if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
357 sbi->ll_flags |= LL_SBI_RMT_CLIENT;
358 LCONSOLE_INFO("client is set as remote by default.\n");
359 }
360 } else {
361 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
362 sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
363 LCONSOLE_INFO("client claims to be remote, but server "
364 "rejected, forced to be local.\n");
365 }
366 }
367
368 if (data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) {
369 LCONSOLE_INFO("client enabled MDS capability!\n");
370 sbi->ll_flags |= LL_SBI_MDS_CAPA;
371 }
372
373 if (data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA) {
374 LCONSOLE_INFO("client enabled OSS capability!\n");
375 sbi->ll_flags |= LL_SBI_OSS_CAPA;
376 }
377
378 if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
379 sbi->ll_flags |= LL_SBI_64BIT_HASH;
380
381 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
382 sbi->ll_md_brw_size = data->ocd_brw_size;
383 else
384 sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
385
386 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
387 LCONSOLE_INFO("Layout lock feature supported.\n");
388 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
389 }
390
391 obd = class_name2obd(dt);
392 if (!obd) {
393 CERROR("DT %s: not setup or attached\n", dt);
394 GOTO(out_md_fid, err = -ENODEV);
395 }
396
397 data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
398 OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
399 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
400 OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
401 OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
402 OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
403 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH |
404 OBD_CONNECT_MAXBYTES |
405 OBD_CONNECT_EINPROGRESS |
406 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
407 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
408
409 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
410 data->ocd_connect_flags |= OBD_CONNECT_SOM;
411
412 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
413 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
414 * disabled by default, because it can still be enabled on the
415 * fly via /proc. As a consequence, we still need to come to an
416 * agreement on the supported algorithms at connect time */
417 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
418
419 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
420 data->ocd_cksum_types = OBD_CKSUM_ADLER;
421 else
422 data->ocd_cksum_types = cksum_types_supported_client();
423 }
424
425 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
426 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
427 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
428
429 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d "
430 "ocd_grant: %d\n", data->ocd_connect_flags,
431 data->ocd_version, data->ocd_grant);
432
433 obd->obd_upcall.onu_owner = &sbi->ll_lco;
434 obd->obd_upcall.onu_upcall = cl_ocd_update;
435
436 data->ocd_brw_size = DT_MAX_BRW_SIZE;
437
438 err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
439 NULL);
440 if (err == -EBUSY) {
441 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
442 "recovery, of which this client is not a "
443 "part. Please wait for recovery to "
444 "complete, abort, or time out.\n", dt);
445 GOTO(out_md, err);
446 } else if (err) {
447 CERROR("%s: Cannot connect to %s: rc = %d\n",
448 sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
449 GOTO(out_md, err);
450 }
451
452 sbi->ll_dt_exp->exp_connect_data = *data;
453
454 err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
455 LUSTRE_SEQ_METADATA);
456 if (err) {
457 CERROR("%s: Can't init data layer FID infrastructure, "
458 "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
459 GOTO(out_dt, err);
460 }
461
462 mutex_lock(&sbi->ll_lco.lco_lock);
463 sbi->ll_lco.lco_flags = data->ocd_connect_flags;
464 sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
465 sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
466 mutex_unlock(&sbi->ll_lco.lco_lock);
467
468 fid_zero(&sbi->ll_root_fid);
469 err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
470 if (err) {
471 CERROR("cannot mds_connect: rc = %d\n", err);
472 GOTO(out_lock_cn_cb, err);
473 }
474 if (!fid_is_sane(&sbi->ll_root_fid)) {
475 CERROR("%s: Invalid root fid "DFID" during mount\n",
476 sbi->ll_md_exp->exp_obd->obd_name,
477 PFID(&sbi->ll_root_fid));
478 GOTO(out_lock_cn_cb, err = -EINVAL);
479 }
480 CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
481
482 sb->s_op = &lustre_super_operations;
483#if THREAD_SIZE >= 8192 /*b=17630*/
484 sb->s_export_op = &lustre_export_operations;
485#endif
486
487 /* make root inode
488 * XXX: move this to after cbd setup? */
489 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMDSCAPA;
490 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
491 valid |= OBD_MD_FLRMTPERM;
492 else if (sbi->ll_flags & LL_SBI_ACL)
493 valid |= OBD_MD_FLACL;
494
495 OBD_ALLOC_PTR(op_data);
496 if (op_data == NULL)
497 GOTO(out_lock_cn_cb, err = -ENOMEM);
498
499 op_data->op_fid1 = sbi->ll_root_fid;
500 op_data->op_mode = 0;
501 op_data->op_capa1 = oc;
502 op_data->op_valid = valid;
503
504 err = md_getattr(sbi->ll_md_exp, op_data, &request);
505 if (oc)
506 capa_put(oc);
507 OBD_FREE_PTR(op_data);
508 if (err) {
509 CERROR("%s: md_getattr failed for root: rc = %d\n",
510 sbi->ll_md_exp->exp_obd->obd_name, err);
511 GOTO(out_lock_cn_cb, err);
512 }
513
514 err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
515 sbi->ll_md_exp, &lmd);
516 if (err) {
517 CERROR("failed to understand root inode md: rc = %d\n", err);
518 ptlrpc_req_finished(request);
519 GOTO(out_lock_cn_cb, err);
520 }
521
522 LASSERT(fid_is_sane(&sbi->ll_root_fid));
523 root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
c1e2699d 524 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03
PT
525 &lmd);
526 md_free_lustre_md(sbi->ll_md_exp, &lmd);
527 ptlrpc_req_finished(request);
528
529 if (root == NULL || IS_ERR(root)) {
530 if (lmd.lsm)
531 obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
532#ifdef CONFIG_FS_POSIX_ACL
533 if (lmd.posix_acl) {
534 posix_acl_release(lmd.posix_acl);
535 lmd.posix_acl = NULL;
536 }
537#endif
538 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
539 root = NULL;
540 CERROR("lustre_lite: bad iget4 for root\n");
541 GOTO(out_root, err);
542 }
543
544 err = ll_close_thread_start(&sbi->ll_lcq);
545 if (err) {
546 CERROR("cannot start close thread: rc %d\n", err);
547 GOTO(out_root, err);
548 }
549
550#ifdef CONFIG_FS_POSIX_ACL
551 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
552 rct_init(&sbi->ll_rct);
553 et_init(&sbi->ll_et);
554 }
555#endif
556
557 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
558 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
559 KEY_CHECKSUM, sizeof(checksum), &checksum,
560 NULL);
561 cl_sb_init(sb);
562
563 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
564 KEY_CACHE_SET, sizeof(sbi->ll_cache),
565 &sbi->ll_cache, NULL);
566
567 sb->s_root = d_make_root(root);
568 if (sb->s_root == NULL) {
569 CERROR("%s: can't make root dentry\n",
570 ll_get_fsname(sb, NULL, 0));
571 GOTO(out_root, err = -ENOMEM);
572 }
573
574 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
575 d_set_d_op(sb->s_root, &ll_d_root_ops);
576 sb->s_d_op = &ll_d_ops;
577
578 sbi->ll_sdev_orig = sb->s_dev;
579
580 /* We set sb->s_dev equal on all lustre clients in order to support
581 * NFS export clustering. NFSD requires that the FSID be the same
582 * on all clients. */
583 /* s_dev is also used in lt_compare() to compare two fs, but that is
584 * only a node-local comparison. */
585 uuid = obd_get_uuid(sbi->ll_md_exp);
586 if (uuid != NULL)
587 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
588
589 if (data != NULL)
590 OBD_FREE_PTR(data);
591 if (osfs != NULL)
592 OBD_FREE_PTR(osfs);
593
594 RETURN(err);
595out_root:
596 if (root)
597 iput(root);
598out_lock_cn_cb:
599 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
600out_dt:
601 obd_disconnect(sbi->ll_dt_exp);
602 sbi->ll_dt_exp = NULL;
603 /* Make sure all OScs are gone, since cl_cache is accessing sbi. */
604 obd_zombie_barrier();
605out_md_fid:
606 obd_fid_fini(sbi->ll_md_exp->exp_obd);
607out_md:
608 obd_disconnect(sbi->ll_md_exp);
609 sbi->ll_md_exp = NULL;
610out:
611 if (data != NULL)
612 OBD_FREE_PTR(data);
613 if (osfs != NULL)
614 OBD_FREE_PTR(osfs);
615 lprocfs_unregister_mountpoint(sbi);
616 return err;
617}
618
619int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
620{
621 int size, rc;
622
623 *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
624 size = sizeof(int);
625 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
626 KEY_MAX_EASIZE, &size, lmmsize, NULL);
627 if (rc)
628 CERROR("Get max mdsize error rc %d \n", rc);
629
630 RETURN(rc);
631}
632
633void ll_dump_inode(struct inode *inode)
634{
635 struct ll_d_hlist_node *tmp;
636 int dentry_count = 0;
637
638 LASSERT(inode != NULL);
639
640 ll_d_hlist_for_each(tmp, &inode->i_dentry)
641 dentry_count++;
642
643 CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
644 inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
645 inode->i_mode, atomic_read(&inode->i_count), dentry_count);
646}
647
648void lustre_dump_dentry(struct dentry *dentry, int recur)
649{
650 struct list_head *tmp;
651 int subdirs = 0;
652
653 LASSERT(dentry != NULL);
654
655 list_for_each(tmp, &dentry->d_subdirs)
656 subdirs++;
657
658 CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u,"
659 " flags=0x%x, fsdata=%p, %d subdirs\n", dentry,
660 dentry->d_name.len, dentry->d_name.name,
661 dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
193deee1 662 dentry->d_parent, dentry->d_inode, d_count(dentry),
d7e09d03
PT
663 dentry->d_flags, dentry->d_fsdata, subdirs);
664 if (dentry->d_inode != NULL)
665 ll_dump_inode(dentry->d_inode);
666
667 if (recur == 0)
668 return;
669
670 list_for_each(tmp, &dentry->d_subdirs) {
671 struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child);
672 lustre_dump_dentry(d, recur - 1);
673 }
674}
675
676void client_common_put_super(struct super_block *sb)
677{
678 struct ll_sb_info *sbi = ll_s2sbi(sb);
679 ENTRY;
680
681#ifdef CONFIG_FS_POSIX_ACL
682 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
683 et_fini(&sbi->ll_et);
684 rct_fini(&sbi->ll_rct);
685 }
686#endif
687
688 ll_close_thread_shutdown(sbi->ll_lcq);
689
690 cl_sb_fini(sb);
691
692 list_del(&sbi->ll_conn_chain);
693
694 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
695 obd_disconnect(sbi->ll_dt_exp);
696 sbi->ll_dt_exp = NULL;
697 /* wait till all OSCs are gone, since cl_cache is accessing sbi.
698 * see LU-2543. */
699 obd_zombie_barrier();
700
701 lprocfs_unregister_mountpoint(sbi);
702
703 obd_fid_fini(sbi->ll_md_exp->exp_obd);
704 obd_disconnect(sbi->ll_md_exp);
705 sbi->ll_md_exp = NULL;
706
707 EXIT;
708}
709
710void ll_kill_super(struct super_block *sb)
711{
712 struct ll_sb_info *sbi;
713
714 ENTRY;
715
716 /* not init sb ?*/
717 if (!(sb->s_flags & MS_ACTIVE))
718 return;
719
720 sbi = ll_s2sbi(sb);
721 /* we need restore s_dev from changed for clustred NFS before put_super
722 * because new kernels have cached s_dev and change sb->s_dev in
723 * put_super not affected real removing devices */
65fb55d1 724 if (sbi) {
d7e09d03 725 sb->s_dev = sbi->ll_sdev_orig;
65fb55d1
NY
726 sbi->ll_umounting = 1;
727 }
d7e09d03
PT
728 EXIT;
729}
730
731char *ll_read_opt(const char *opt, char *data)
732{
733 char *value;
734 char *retval;
735 ENTRY;
736
737 CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
738 if (strncmp(opt, data, strlen(opt)))
739 RETURN(NULL);
740 if ((value = strchr(data, '=')) == NULL)
741 RETURN(NULL);
742
743 value++;
744 OBD_ALLOC(retval, strlen(value) + 1);
745 if (!retval) {
746 CERROR("out of memory!\n");
747 RETURN(NULL);
748 }
749
750 memcpy(retval, value, strlen(value)+1);
751 CDEBUG(D_SUPER, "Assigned option: %s, value %s\n", opt, retval);
752 RETURN(retval);
753}
754
755static inline int ll_set_opt(const char *opt, char *data, int fl)
756{
757 if (strncmp(opt, data, strlen(opt)) != 0)
758 return(0);
759 else
760 return(fl);
761}
762
763/* non-client-specific mount options are parsed in lmd_parse */
764static int ll_options(char *options, int *flags)
765{
766 int tmp;
767 char *s1 = options, *s2;
768 ENTRY;
769
770 if (!options)
771 RETURN(0);
772
773 CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
774
775 while (*s1) {
776 CDEBUG(D_SUPER, "next opt=%s\n", s1);
777 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
778 if (tmp) {
779 *flags |= tmp;
780 goto next;
781 }
782 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
783 if (tmp) {
784 *flags |= tmp;
785 goto next;
786 }
787 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
788 if (tmp) {
789 *flags |= tmp;
790 goto next;
791 }
792 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
793 if (tmp) {
794 *flags &= ~tmp;
795 goto next;
796 }
797 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
798 if (tmp) {
799 *flags |= tmp;
800 goto next;
801 }
802 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
803 if (tmp) {
804 *flags &= ~tmp;
805 goto next;
806 }
807#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 5, 50, 0)
808 tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
809 if (tmp) {
810 /* Ignore deprecated mount option. The client will
811 * always try to mount with ACL support, whether this
812 * is used depends on whether server supports it. */
813 LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
814 "mount option 'acl'.\n");
815 goto next;
816 }
817 tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
818 if (tmp) {
819 LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
820 "mount option 'noacl'.\n");
821 goto next;
822 }
823#else
824#warning "{no}acl options have been deprecated since 1.8, please remove them"
825#endif
826 tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
827 if (tmp) {
828 *flags |= tmp;
829 goto next;
830 }
831 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
832 if (tmp) {
833 *flags |= tmp;
834 goto next;
835 }
836 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
837 if (tmp) {
838 *flags &= ~tmp;
839 goto next;
840 }
841
842 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
843 if (tmp) {
844 *flags |= tmp;
845 goto next;
846 }
847 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
848 if (tmp) {
849 *flags &= ~tmp;
850 goto next;
851 }
852 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
853 if (tmp) {
854 *flags |= tmp;
855 goto next;
856 }
857 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
858 if (tmp) {
859 *flags &= ~tmp;
860 goto next;
861 }
862 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
863 if (tmp) {
864 *flags |= tmp;
865 goto next;
866 }
867 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
868 if (tmp) {
869 *flags &= ~tmp;
870 goto next;
871 }
872 tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
873 if (tmp) {
874 *flags |= tmp;
875 goto next;
876 }
877 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
878 if (tmp) {
879 *flags |= tmp;
880 goto next;
881 }
882 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
883 if (tmp) {
884 *flags |= tmp;
885 goto next;
886 }
887 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
888 if (tmp) {
889 *flags &= ~tmp;
890 goto next;
891 }
892 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
893 s1);
894 RETURN(-EINVAL);
895
896next:
897 /* Find next opt */
898 s2 = strchr(s1, ',');
899 if (s2 == NULL)
900 break;
901 s1 = s2 + 1;
902 }
903 RETURN(0);
904}
905
906void ll_lli_init(struct ll_inode_info *lli)
907{
908 lli->lli_inode_magic = LLI_INODE_MAGIC;
909 lli->lli_flags = 0;
910 lli->lli_ioepoch = 0;
911 lli->lli_maxbytes = MAX_LFS_FILESIZE;
912 spin_lock_init(&lli->lli_lock);
913 lli->lli_posix_acl = NULL;
914 lli->lli_remote_perms = NULL;
915 mutex_init(&lli->lli_rmtperm_mutex);
916 /* Do not set lli_fid, it has been initialized already. */
917 fid_zero(&lli->lli_pfid);
918 INIT_LIST_HEAD(&lli->lli_close_list);
919 INIT_LIST_HEAD(&lli->lli_oss_capas);
920 atomic_set(&lli->lli_open_count, 0);
921 lli->lli_mds_capa = NULL;
922 lli->lli_rmtperm_time = 0;
923 lli->lli_pending_och = NULL;
924 lli->lli_mds_read_och = NULL;
925 lli->lli_mds_write_och = NULL;
926 lli->lli_mds_exec_och = NULL;
927 lli->lli_open_fd_read_count = 0;
928 lli->lli_open_fd_write_count = 0;
929 lli->lli_open_fd_exec_count = 0;
930 mutex_init(&lli->lli_och_mutex);
931 spin_lock_init(&lli->lli_agl_lock);
932 lli->lli_has_smd = false;
933 lli->lli_layout_gen = LL_LAYOUT_GEN_NONE;
934 lli->lli_clob = NULL;
935
936 LASSERT(lli->lli_vfs_inode.i_mode != 0);
937 if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
938 mutex_init(&lli->lli_readdir_mutex);
939 lli->lli_opendir_key = NULL;
940 lli->lli_sai = NULL;
941 lli->lli_def_acl = NULL;
942 spin_lock_init(&lli->lli_sa_lock);
943 lli->lli_opendir_pid = 0;
944 } else {
945 sema_init(&lli->lli_size_sem, 1);
946 lli->lli_size_sem_owner = NULL;
947 lli->lli_symlink_name = NULL;
948 init_rwsem(&lli->lli_trunc_sem);
949 mutex_init(&lli->lli_write_mutex);
950 init_rwsem(&lli->lli_glimpse_sem);
951 lli->lli_glimpse_time = 0;
952 INIT_LIST_HEAD(&lli->lli_agl_list);
953 lli->lli_agl_index = 0;
954 lli->lli_async_rc = 0;
955 lli->lli_volatile = false;
956 }
957 mutex_init(&lli->lli_layout_mutex);
958}
959
960static inline int ll_bdi_register(struct backing_dev_info *bdi)
961{
962 static atomic_t ll_bdi_num = ATOMIC_INIT(0);
963
964 bdi->name = "lustre";
965 return bdi_register(bdi, NULL, "lustre-%d",
966 atomic_inc_return(&ll_bdi_num));
967}
968
969int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
970{
971 struct lustre_profile *lprof = NULL;
972 struct lustre_sb_info *lsi = s2lsi(sb);
973 struct ll_sb_info *sbi;
974 char *dt = NULL, *md = NULL;
975 char *profilenm = get_profile_name(sb);
976 struct config_llog_instance *cfg;
977 /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
978 const int instlen = sizeof(cfg->cfg_instance) * 2 + 2;
979 int err;
980 ENTRY;
981
982 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
983
984 OBD_ALLOC_PTR(cfg);
985 if (cfg == NULL)
986 RETURN(-ENOMEM);
987
988 try_module_get(THIS_MODULE);
989
990 /* client additional sb info */
991 lsi->lsi_llsbi = sbi = ll_init_sbi();
992 if (!sbi) {
993 module_put(THIS_MODULE);
994 OBD_FREE_PTR(cfg);
995 RETURN(-ENOMEM);
996 }
997
998 err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
999 if (err)
1000 GOTO(out_free, err);
1001
1002 err = bdi_init(&lsi->lsi_bdi);
1003 if (err)
1004 GOTO(out_free, err);
1005 lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1006 lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1007 err = ll_bdi_register(&lsi->lsi_bdi);
1008 if (err)
1009 GOTO(out_free, err);
1010
1011 sb->s_bdi = &lsi->lsi_bdi;
1012
1013 /* Generate a string unique to this super, in case some joker tries
1014 to mount the same fs at two mount points.
1015 Use the address of the super itself.*/
1016 cfg->cfg_instance = sb;
1017 cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1018 cfg->cfg_callback = class_config_llog_handler;
1019 /* set up client obds */
1020 err = lustre_process_log(sb, profilenm, cfg);
1021 if (err < 0) {
1022 CERROR("Unable to process log: %d\n", err);
1023 GOTO(out_free, err);
1024 }
1025
1026 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1027 lprof = class_get_profile(profilenm);
1028 if (lprof == NULL) {
1029 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1030 " read from the MGS. Does that filesystem "
1031 "exist?\n", profilenm);
1032 GOTO(out_free, err = -EINVAL);
1033 }
1034 CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1035 lprof->lp_md, lprof->lp_dt);
1036
1037 OBD_ALLOC(dt, strlen(lprof->lp_dt) + instlen + 2);
1038 if (!dt)
1039 GOTO(out_free, err = -ENOMEM);
1040 sprintf(dt, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
1041
1042 OBD_ALLOC(md, strlen(lprof->lp_md) + instlen + 2);
1043 if (!md)
1044 GOTO(out_free, err = -ENOMEM);
1045 sprintf(md, "%s-%p", lprof->lp_md, cfg->cfg_instance);
1046
1047 /* connections, registrations, sb setup */
1048 err = client_common_fill_super(sb, md, dt, mnt);
1049
1050out_free:
1051 if (md)
1052 OBD_FREE(md, strlen(lprof->lp_md) + instlen + 2);
1053 if (dt)
1054 OBD_FREE(dt, strlen(lprof->lp_dt) + instlen + 2);
1055 if (err)
1056 ll_put_super(sb);
1057 else if (sbi->ll_flags & LL_SBI_VERBOSE)
1058 LCONSOLE_WARN("Mounted %s\n", profilenm);
1059
1060 OBD_FREE_PTR(cfg);
1061 RETURN(err);
1062} /* ll_fill_super */
1063
d7e09d03
PT
1064void ll_put_super(struct super_block *sb)
1065{
1066 struct config_llog_instance cfg;
1067 struct obd_device *obd;
1068 struct lustre_sb_info *lsi = s2lsi(sb);
1069 struct ll_sb_info *sbi = ll_s2sbi(sb);
1070 char *profilenm = get_profile_name(sb);
c52f69c5 1071 int next, force = 1;
d7e09d03
PT
1072 ENTRY;
1073
1074 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
1075
1076 ll_print_capa_stat(sbi);
1077
1078 cfg.cfg_instance = sb;
1079 lustre_end_log(sb, profilenm, &cfg);
1080
1081 if (sbi->ll_md_exp) {
1082 obd = class_exp2obd(sbi->ll_md_exp);
1083 if (obd)
1084 force = obd->obd_force;
1085 }
1086
d7e09d03
PT
1087 /* We need to set force before the lov_disconnect in
1088 lustre_common_put_super, since l_d cleans up osc's as well. */
1089 if (force) {
1090 next = 0;
1091 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1092 &next)) != NULL) {
1093 obd->obd_force = force;
1094 }
1095 }
1096
1097 if (sbi->ll_lcq) {
1098 /* Only if client_common_fill_super succeeded */
1099 client_common_put_super(sb);
1100 }
1101
1102 next = 0;
1103 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
1104 class_manual_cleanup(obd);
1105 }
1106
1107 if (sbi->ll_flags & LL_SBI_VERBOSE)
1108 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1109
1110 if (profilenm)
1111 class_del_profile(profilenm);
1112
1113 if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1114 bdi_destroy(&lsi->lsi_bdi);
1115 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1116 }
1117
1118 ll_free_sbi(sb);
1119 lsi->lsi_llsbi = NULL;
1120
1121 lustre_common_put_super(sb);
1122
1123 module_put(THIS_MODULE);
1124
1125 EXIT;
1126} /* client_put_super */
1127
1128struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1129{
1130 struct inode *inode = NULL;
1131
1132 /* NOTE: we depend on atomic igrab() -bzzz */
1133 lock_res_and_lock(lock);
1134 if (lock->l_resource->lr_lvb_inode) {
1135 struct ll_inode_info * lli;
1136 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1137 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1138 inode = igrab(lock->l_resource->lr_lvb_inode);
1139 } else {
1140 inode = lock->l_resource->lr_lvb_inode;
1141 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
1142 D_WARNING, lock, "lr_lvb_inode %p is "
1143 "bogus: magic %08x",
1144 lock->l_resource->lr_lvb_inode,
1145 lli->lli_inode_magic);
1146 inode = NULL;
1147 }
1148 }
1149 unlock_res_and_lock(lock);
1150 return inode;
1151}
1152
1153struct inode *ll_inode_from_lock(struct ldlm_lock *lock)
1154{
1155 struct inode *inode = NULL;
1156 /* NOTE: we depend on atomic igrab() -bzzz */
1157 lock_res_and_lock(lock);
1158 if (lock->l_ast_data) {
1159 struct ll_inode_info *lli = ll_i2info(lock->l_ast_data);
1160 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1161 inode = igrab(lock->l_ast_data);
1162 } else {
1163 inode = lock->l_ast_data;
1164 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
1165 D_WARNING, lock, "l_ast_data %p is "
1166 "bogus: magic %08x", lock->l_ast_data,
1167 lli->lli_inode_magic);
1168 inode = NULL;
1169 }
1170 }
1171 unlock_res_and_lock(lock);
1172 return inode;
1173}
1174
1175void ll_clear_inode(struct inode *inode)
1176{
1177 struct ll_inode_info *lli = ll_i2info(inode);
1178 struct ll_sb_info *sbi = ll_i2sbi(inode);
1179 ENTRY;
1180
1181 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1182 inode->i_generation, inode);
1183
1184 if (S_ISDIR(inode->i_mode)) {
1185 /* these should have been cleared in ll_file_release */
1186 LASSERT(lli->lli_opendir_key == NULL);
1187 LASSERT(lli->lli_sai == NULL);
1188 LASSERT(lli->lli_opendir_pid == 0);
1189 }
1190
1191 ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
1192 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1193
1194 LASSERT(!lli->lli_open_fd_write_count);
1195 LASSERT(!lli->lli_open_fd_read_count);
1196 LASSERT(!lli->lli_open_fd_exec_count);
1197
1198 if (lli->lli_mds_write_och)
1199 ll_md_real_close(inode, FMODE_WRITE);
1200 if (lli->lli_mds_exec_och)
1201 ll_md_real_close(inode, FMODE_EXEC);
1202 if (lli->lli_mds_read_och)
1203 ll_md_real_close(inode, FMODE_READ);
1204
1205 if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1206 OBD_FREE(lli->lli_symlink_name,
1207 strlen(lli->lli_symlink_name) + 1);
1208 lli->lli_symlink_name = NULL;
1209 }
1210
1211 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1212 LASSERT(lli->lli_posix_acl == NULL);
1213 if (lli->lli_remote_perms) {
1214 free_rmtperm_hash(lli->lli_remote_perms);
1215 lli->lli_remote_perms = NULL;
1216 }
1217 }
1218#ifdef CONFIG_FS_POSIX_ACL
1219 else if (lli->lli_posix_acl) {
1220 LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
1221 LASSERT(lli->lli_remote_perms == NULL);
1222 posix_acl_release(lli->lli_posix_acl);
1223 lli->lli_posix_acl = NULL;
1224 }
1225#endif
1226 lli->lli_inode_magic = LLI_INODE_DEAD;
1227
1228 ll_clear_inode_capas(inode);
1229 if (!S_ISDIR(inode->i_mode))
1230 LASSERT(list_empty(&lli->lli_agl_list));
1231
1232 /*
1233 * XXX This has to be done before lsm is freed below, because
1234 * cl_object still uses inode lsm.
1235 */
1236 cl_inode_fini(inode);
1237 lli->lli_has_smd = false;
1238
1239 EXIT;
1240}
1241
1242int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
1243 struct md_open_data **mod)
1244{
1245 struct lustre_md md;
1246 struct inode *inode = dentry->d_inode;
1247 struct ll_sb_info *sbi = ll_i2sbi(inode);
1248 struct ptlrpc_request *request = NULL;
1249 int rc, ia_valid;
1250 ENTRY;
1251
1252 op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1253 LUSTRE_OPC_ANY, NULL);
1254 if (IS_ERR(op_data))
1255 RETURN(PTR_ERR(op_data));
1256
1257 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
1258 &request, mod);
1259 if (rc) {
1260 ptlrpc_req_finished(request);
1261 if (rc == -ENOENT) {
1262 clear_nlink(inode);
1263 /* Unlinked special device node? Or just a race?
1264 * Pretend we done everything. */
1265 if (!S_ISREG(inode->i_mode) &&
1266 !S_ISDIR(inode->i_mode)) {
1267 ia_valid = op_data->op_attr.ia_valid;
1268 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1269 rc = simple_setattr(dentry, &op_data->op_attr);
1270 op_data->op_attr.ia_valid = ia_valid;
1271 }
1272 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1273 CERROR("md_setattr fails: rc = %d\n", rc);
1274 }
1275 RETURN(rc);
1276 }
1277
1278 rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1279 sbi->ll_md_exp, &md);
1280 if (rc) {
1281 ptlrpc_req_finished(request);
1282 RETURN(rc);
1283 }
1284
251c4317
JH
1285 ia_valid = op_data->op_attr.ia_valid;
1286 /* inode size will be in ll_setattr_ost, can't do it now since dirty
1287 * cache is not cleared yet. */
1288 op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1289 rc = simple_setattr(dentry, &op_data->op_attr);
1290 op_data->op_attr.ia_valid = ia_valid;
1291
d7e09d03
PT
1292 /* Extract epoch data if obtained. */
1293 op_data->op_handle = md.body->handle;
1294 op_data->op_ioepoch = md.body->ioepoch;
1295
1296 ll_update_inode(inode, &md);
1297 ptlrpc_req_finished(request);
1298
1299 RETURN(rc);
1300}
1301
1302/* Close IO epoch and send Size-on-MDS attribute update. */
1303static int ll_setattr_done_writing(struct inode *inode,
1304 struct md_op_data *op_data,
1305 struct md_open_data *mod)
1306{
1307 struct ll_inode_info *lli = ll_i2info(inode);
1308 int rc = 0;
1309 ENTRY;
1310
1311 LASSERT(op_data != NULL);
1312 if (!S_ISREG(inode->i_mode))
1313 RETURN(0);
1314
1315 CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID" for truncate\n",
1316 op_data->op_ioepoch, PFID(&lli->lli_fid));
1317
1318 op_data->op_flags = MF_EPOCH_CLOSE;
1319 ll_done_writing_attr(inode, op_data);
1320 ll_pack_inode2opdata(inode, op_data, NULL);
1321
1322 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1323 if (rc == -EAGAIN) {
1324 /* MDS has instructed us to obtain Size-on-MDS attribute
1325 * from OSTs and send setattr to back to MDS. */
1326 rc = ll_som_update(inode, op_data);
1327 } else if (rc) {
1328 CERROR("inode %lu mdc truncate failed: rc = %d\n",
1329 inode->i_ino, rc);
1330 }
1331 RETURN(rc);
1332}
1333
1334static int ll_setattr_ost(struct inode *inode, struct iattr *attr)
1335{
1336 struct obd_capa *capa;
1337 int rc;
1338
1339 if (attr->ia_valid & ATTR_SIZE)
1340 capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
1341 else
1342 capa = ll_mdscapa_get(inode);
1343
1344 rc = cl_setattr_ost(inode, attr, capa);
1345
1346 if (attr->ia_valid & ATTR_SIZE)
1347 ll_truncate_free_capa(capa);
1348 else
1349 capa_put(capa);
1350
1351 return rc;
1352}
1353
1354
1355/* If this inode has objects allocated to it (lsm != NULL), then the OST
1356 * object(s) determine the file size and mtime. Otherwise, the MDS will
1357 * keep these values until such a time that objects are allocated for it.
1358 * We do the MDS operations first, as it is checking permissions for us.
1359 * We don't to the MDS RPC if there is nothing that we want to store there,
1360 * otherwise there is no harm in updating mtime/atime on the MDS if we are
1361 * going to do an RPC anyways.
1362 *
1363 * If we are doing a truncate, we will send the mtime and ctime updates
1364 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1365 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1366 * at the same time.
1367 */
1368int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
1369{
1370 struct inode *inode = dentry->d_inode;
1371 struct ll_inode_info *lli = ll_i2info(inode);
1372 struct md_op_data *op_data = NULL;
1373 struct md_open_data *mod = NULL;
1374 int rc = 0, rc1 = 0;
1375 ENTRY;
1376
1377 CDEBUG(D_VFSTRACE, "%s: setattr inode %p/fid:"DFID" from %llu to %llu, "
1378 "valid %x\n", ll_get_fsname(inode->i_sb, NULL, 0), inode,
1379 PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
1380 attr->ia_valid);
1381
1382 if (attr->ia_valid & ATTR_SIZE) {
1383 /* Check new size against VFS/VM file size limit and rlimit */
1384 rc = inode_newsize_ok(inode, attr->ia_size);
1385 if (rc)
1386 RETURN(rc);
1387
1388 /* The maximum Lustre file size is variable, based on the
1389 * OST maximum object size and number of stripes. This
1390 * needs another check in addition to the VFS check above. */
1391 if (attr->ia_size > ll_file_maxbytes(inode)) {
1392 CDEBUG(D_INODE,"file "DFID" too large %llu > "LPU64"\n",
1393 PFID(&lli->lli_fid), attr->ia_size,
1394 ll_file_maxbytes(inode));
1395 RETURN(-EFBIG);
1396 }
1397
1398 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1399 }
1400
1401 /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1402 if (attr->ia_valid & TIMES_SET_FLAGS) {
4b1a25f0 1403 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
d7e09d03
PT
1404 !cfs_capable(CFS_CAP_FOWNER))
1405 RETURN(-EPERM);
1406 }
1407
1408 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1409 if (attr->ia_valid & ATTR_CTIME) {
1410 attr->ia_ctime = CFS_CURRENT_TIME;
1411 attr->ia_valid |= ATTR_CTIME_SET;
1412 }
1413 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1414 (attr->ia_valid & ATTR_ATIME)) {
1415 attr->ia_atime = CFS_CURRENT_TIME;
1416 attr->ia_valid |= ATTR_ATIME_SET;
1417 }
1418 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1419 (attr->ia_valid & ATTR_MTIME)) {
1420 attr->ia_mtime = CFS_CURRENT_TIME;
1421 attr->ia_valid |= ATTR_MTIME_SET;
1422 }
1423
1424 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1425 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
1426 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
1427 cfs_time_current_sec());
1428
1429 /* If we are changing file size, file content is modified, flag it. */
1430 if (attr->ia_valid & ATTR_SIZE) {
1431 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1432 spin_lock(&lli->lli_lock);
1433 lli->lli_flags |= LLIF_DATA_MODIFIED;
1434 spin_unlock(&lli->lli_lock);
1435 }
1436
1437 /* We always do an MDS RPC, even if we're only changing the size;
1438 * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1439
1440 OBD_ALLOC_PTR(op_data);
1441 if (op_data == NULL)
1442 RETURN(-ENOMEM);
1443
1444 if (!S_ISDIR(inode->i_mode)) {
1445 if (attr->ia_valid & ATTR_SIZE)
1446 inode_dio_write_done(inode);
1447 mutex_unlock(&inode->i_mutex);
1448 down_write(&lli->lli_trunc_sem);
1449 }
1450
1451 memcpy(&op_data->op_attr, attr, sizeof(*attr));
1452
1453 /* Open epoch for truncate. */
1454 if (exp_connect_som(ll_i2mdexp(inode)) &&
1455 (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1456 op_data->op_flags = MF_EPOCH_OPEN;
1457
1458 rc = ll_md_setattr(dentry, op_data, &mod);
1459 if (rc)
1460 GOTO(out, rc);
1461
1462 /* RPC to MDT is sent, cancel data modification flag */
1463 if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
1464 spin_lock(&lli->lli_lock);
1465 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
1466 spin_unlock(&lli->lli_lock);
1467 }
1468
1469 ll_ioepoch_open(lli, op_data->op_ioepoch);
1470 if (!S_ISREG(inode->i_mode))
1471 GOTO(out, rc = 0);
1472
1473 if (attr->ia_valid & (ATTR_SIZE |
1474 ATTR_ATIME | ATTR_ATIME_SET |
1475 ATTR_MTIME | ATTR_MTIME_SET))
1476 /* For truncate and utimes sending attributes to OSTs, setting
1477 * mtime/atime to the past will be performed under PW [0:EOF]
1478 * extent lock (new_size:EOF for truncate). It may seem
1479 * excessive to send mtime/atime updates to OSTs when not
1480 * setting times to past, but it is necessary due to possible
1481 * time de-synchronization between MDT inode and OST objects */
1482 rc = ll_setattr_ost(inode, attr);
1483 EXIT;
1484out:
1485 if (op_data) {
1486 if (op_data->op_ioepoch) {
1487 rc1 = ll_setattr_done_writing(inode, op_data, mod);
1488 if (!rc)
1489 rc = rc1;
1490 }
1491 ll_finish_md_op_data(op_data);
1492 }
1493 if (!S_ISDIR(inode->i_mode)) {
1494 up_write(&lli->lli_trunc_sem);
1495 mutex_lock(&inode->i_mutex);
1496 if (attr->ia_valid & ATTR_SIZE)
1497 inode_dio_wait(inode);
1498 }
1499
1500 ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1501 LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1502
251c4317 1503 return rc;
d7e09d03
PT
1504}
1505
1506int ll_setattr(struct dentry *de, struct iattr *attr)
1507{
1508 int mode = de->d_inode->i_mode;
1509
1510 if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1511 (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1512 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1513
1514 if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
1515 (ATTR_SIZE|ATTR_MODE)) &&
1516 (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
1517 (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1518 !(attr->ia_mode & S_ISGID))))
1519 attr->ia_valid |= ATTR_FORCE;
1520
1521 if ((mode & S_ISUID) &&
1522 !(attr->ia_mode & S_ISUID) &&
1523 !(attr->ia_valid & ATTR_KILL_SUID))
1524 attr->ia_valid |= ATTR_KILL_SUID;
1525
1526 if (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1527 !(attr->ia_mode & S_ISGID) &&
1528 !(attr->ia_valid & ATTR_KILL_SGID))
1529 attr->ia_valid |= ATTR_KILL_SGID;
1530
1531 return ll_setattr_raw(de, attr);
1532}
1533
1534int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1535 __u64 max_age, __u32 flags)
1536{
1537 struct ll_sb_info *sbi = ll_s2sbi(sb);
1538 struct obd_statfs obd_osfs;
1539 int rc;
1540 ENTRY;
1541
1542 rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1543 if (rc) {
1544 CERROR("md_statfs fails: rc = %d\n", rc);
1545 RETURN(rc);
1546 }
1547
1548 osfs->os_type = sb->s_magic;
1549
1550 CDEBUG(D_SUPER, "MDC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1551 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,osfs->os_files);
1552
1553 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1554 flags |= OBD_STATFS_NODELAY;
1555
1556 rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1557 if (rc) {
1558 CERROR("obd_statfs fails: rc = %d\n", rc);
1559 RETURN(rc);
1560 }
1561
1562 CDEBUG(D_SUPER, "OSC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
1563 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1564 obd_osfs.os_files);
1565
1566 osfs->os_bsize = obd_osfs.os_bsize;
1567 osfs->os_blocks = obd_osfs.os_blocks;
1568 osfs->os_bfree = obd_osfs.os_bfree;
1569 osfs->os_bavail = obd_osfs.os_bavail;
1570
1571 /* If we don't have as many objects free on the OST as inodes
1572 * on the MDS, we reduce the total number of inodes to
1573 * compensate, so that the "inodes in use" number is correct.
1574 */
1575 if (obd_osfs.os_ffree < osfs->os_ffree) {
1576 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1577 obd_osfs.os_ffree;
1578 osfs->os_ffree = obd_osfs.os_ffree;
1579 }
1580
1581 RETURN(rc);
1582}
1583int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1584{
1585 struct super_block *sb = de->d_sb;
1586 struct obd_statfs osfs;
1587 int rc;
1588
1589 CDEBUG(D_VFSTRACE, "VFS Op: at "LPU64" jiffies\n", get_jiffies_64());
1590 ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1591
1592 /* Some amount of caching on the client is allowed */
1593 rc = ll_statfs_internal(sb, &osfs,
1594 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1595 0);
1596 if (rc)
1597 return rc;
1598
1599 statfs_unpack(sfs, &osfs);
1600
1601 /* We need to downshift for all 32-bit kernels, because we can't
1602 * tell if the kernel is being called via sys_statfs64() or not.
1603 * Stop before overflowing f_bsize - in which case it is better
1604 * to just risk EOVERFLOW if caller is using old sys_statfs(). */
1605 if (sizeof(long) < 8) {
1606 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1607 sfs->f_bsize <<= 1;
1608
1609 osfs.os_blocks >>= 1;
1610 osfs.os_bfree >>= 1;
1611 osfs.os_bavail >>= 1;
1612 }
1613 }
1614
1615 sfs->f_blocks = osfs.os_blocks;
1616 sfs->f_bfree = osfs.os_bfree;
1617 sfs->f_bavail = osfs.os_bavail;
1618
1619 return 0;
1620}
1621
1622void ll_inode_size_lock(struct inode *inode)
1623{
1624 struct ll_inode_info *lli;
1625
1626 LASSERT(!S_ISDIR(inode->i_mode));
1627
1628 lli = ll_i2info(inode);
1629 LASSERT(lli->lli_size_sem_owner != current);
1630 down(&lli->lli_size_sem);
1631 LASSERT(lli->lli_size_sem_owner == NULL);
1632 lli->lli_size_sem_owner = current;
1633}
1634
1635void ll_inode_size_unlock(struct inode *inode)
1636{
1637 struct ll_inode_info *lli;
1638
1639 lli = ll_i2info(inode);
1640 LASSERT(lli->lli_size_sem_owner == current);
1641 lli->lli_size_sem_owner = NULL;
1642 up(&lli->lli_size_sem);
1643}
1644
1645void ll_update_inode(struct inode *inode, struct lustre_md *md)
1646{
1647 struct ll_inode_info *lli = ll_i2info(inode);
1648 struct mdt_body *body = md->body;
1649 struct lov_stripe_md *lsm = md->lsm;
1650 struct ll_sb_info *sbi = ll_i2sbi(inode);
1651
1652 LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1653 if (lsm != NULL) {
1654 if (!lli->lli_has_smd &&
1655 !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
1656 cl_file_inode_init(inode, md);
1657
1658 lli->lli_maxbytes = lsm->lsm_maxbytes;
1659 if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
1660 lli->lli_maxbytes = MAX_LFS_FILESIZE;
1661 }
1662
1663 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1664 if (body->valid & OBD_MD_FLRMTPERM)
1665 ll_update_remote_perm(inode, md->remote_perm);
1666 }
1667#ifdef CONFIG_FS_POSIX_ACL
1668 else if (body->valid & OBD_MD_FLACL) {
1669 spin_lock(&lli->lli_lock);
1670 if (lli->lli_posix_acl)
1671 posix_acl_release(lli->lli_posix_acl);
1672 lli->lli_posix_acl = md->posix_acl;
1673 spin_unlock(&lli->lli_lock);
1674 }
1675#endif
c1e2699d 1676 inode->i_ino = cl_fid_build_ino(&body->fid1,
1677 sbi->ll_flags & LL_SBI_32BIT_API);
d7e09d03
PT
1678 inode->i_generation = cl_fid_build_gen(&body->fid1);
1679
1680 if (body->valid & OBD_MD_FLATIME) {
1681 if (body->atime > LTIME_S(inode->i_atime))
1682 LTIME_S(inode->i_atime) = body->atime;
1683 lli->lli_lvb.lvb_atime = body->atime;
1684 }
1685 if (body->valid & OBD_MD_FLMTIME) {
1686 if (body->mtime > LTIME_S(inode->i_mtime)) {
1687 CDEBUG(D_INODE, "setting ino %lu mtime from %lu "
1688 "to "LPU64"\n", inode->i_ino,
1689 LTIME_S(inode->i_mtime), body->mtime);
1690 LTIME_S(inode->i_mtime) = body->mtime;
1691 }
1692 lli->lli_lvb.lvb_mtime = body->mtime;
1693 }
1694 if (body->valid & OBD_MD_FLCTIME) {
1695 if (body->ctime > LTIME_S(inode->i_ctime))
1696 LTIME_S(inode->i_ctime) = body->ctime;
1697 lli->lli_lvb.lvb_ctime = body->ctime;
1698 }
1699 if (body->valid & OBD_MD_FLMODE)
1700 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1701 if (body->valid & OBD_MD_FLTYPE)
1702 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1703 LASSERT(inode->i_mode != 0);
1704 if (S_ISREG(inode->i_mode)) {
1705 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
1706 } else {
1707 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1708 }
1709 if (body->valid & OBD_MD_FLUID)
4b1a25f0 1710 inode->i_uid = make_kuid(&init_user_ns, body->uid);
d7e09d03 1711 if (body->valid & OBD_MD_FLGID)
4b1a25f0 1712 inode->i_gid = make_kgid(&init_user_ns, body->gid);
d7e09d03
PT
1713 if (body->valid & OBD_MD_FLFLAGS)
1714 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1715 if (body->valid & OBD_MD_FLNLINK)
1716 set_nlink(inode, body->nlink);
1717 if (body->valid & OBD_MD_FLRDEV)
1718 inode->i_rdev = old_decode_dev(body->rdev);
1719
1720 if (body->valid & OBD_MD_FLID) {
1721 /* FID shouldn't be changed! */
1722 if (fid_is_sane(&lli->lli_fid)) {
1723 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1724 "Trying to change FID "DFID
1725 " to the "DFID", inode %lu/%u(%p)\n",
1726 PFID(&lli->lli_fid), PFID(&body->fid1),
1727 inode->i_ino, inode->i_generation, inode);
1728 } else
1729 lli->lli_fid = body->fid1;
1730 }
1731
1732 LASSERT(fid_seq(&lli->lli_fid) != 0);
1733
1734 if (body->valid & OBD_MD_FLSIZE) {
1735 if (exp_connect_som(ll_i2mdexp(inode)) &&
1736 S_ISREG(inode->i_mode)) {
1737 struct lustre_handle lockh;
1738 ldlm_mode_t mode;
1739
1740 /* As it is possible a blocking ast has been processed
1741 * by this time, we need to check there is an UPDATE
1742 * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1743 * it. */
1744 mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
1745 &lockh, LDLM_FL_CBPENDING);
1746 if (mode) {
1747 if (lli->lli_flags & (LLIF_DONE_WRITING |
1748 LLIF_EPOCH_PENDING |
1749 LLIF_SOM_DIRTY)) {
1750 CERROR("ino %lu flags %u still has "
1751 "size authority! do not trust "
1752 "the size got from MDS\n",
1753 inode->i_ino, lli->lli_flags);
1754 } else {
1755 /* Use old size assignment to avoid
1756 * deadlock bz14138 & bz14326 */
1757 i_size_write(inode, body->size);
1758 lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
1759 }
1760 ldlm_lock_decref(&lockh, mode);
1761 }
1762 } else {
1763 /* Use old size assignment to avoid
1764 * deadlock bz14138 & bz14326 */
1765 i_size_write(inode, body->size);
1766
1767 CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
1768 inode->i_ino, (unsigned long long)body->size);
1769 }
1770
1771 if (body->valid & OBD_MD_FLBLOCKS)
1772 inode->i_blocks = body->blocks;
1773 }
1774
1775 if (body->valid & OBD_MD_FLMDSCAPA) {
1776 LASSERT(md->mds_capa);
1777 ll_add_capa(inode, md->mds_capa);
1778 }
1779 if (body->valid & OBD_MD_FLOSSCAPA) {
1780 LASSERT(md->oss_capa);
1781 ll_add_capa(inode, md->oss_capa);
1782 }
1783}
1784
1785void ll_read_inode2(struct inode *inode, void *opaque)
1786{
1787 struct lustre_md *md = opaque;
1788 struct ll_inode_info *lli = ll_i2info(inode);
1789 ENTRY;
1790
1791 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1792 PFID(&lli->lli_fid), inode);
1793
1794 LASSERT(!lli->lli_has_smd);
1795
1796 /* Core attributes from the MDS first. This is a new inode, and
1797 * the VFS doesn't zero times in the core inode so we have to do
1798 * it ourselves. They will be overwritten by either MDS or OST
1799 * attributes - we just need to make sure they aren't newer. */
1800 LTIME_S(inode->i_mtime) = 0;
1801 LTIME_S(inode->i_atime) = 0;
1802 LTIME_S(inode->i_ctime) = 0;
1803 inode->i_rdev = 0;
1804 ll_update_inode(inode, md);
1805
1806 /* OIDEBUG(inode); */
1807
1808 /* initializing backing dev info. */
1809 inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
1810
1811
1812 if (S_ISREG(inode->i_mode)) {
1813 struct ll_sb_info *sbi = ll_i2sbi(inode);
1814 inode->i_op = &ll_file_inode_operations;
1815 inode->i_fop = sbi->ll_fop;
1816 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
1817 EXIT;
1818 } else if (S_ISDIR(inode->i_mode)) {
1819 inode->i_op = &ll_dir_inode_operations;
1820 inode->i_fop = &ll_dir_operations;
1821 EXIT;
1822 } else if (S_ISLNK(inode->i_mode)) {
1823 inode->i_op = &ll_fast_symlink_inode_operations;
1824 EXIT;
1825 } else {
1826 inode->i_op = &ll_special_inode_operations;
1827
1828 init_special_inode(inode, inode->i_mode,
1829 inode->i_rdev);
1830
1831 EXIT;
1832 }
1833}
1834
1835void ll_delete_inode(struct inode *inode)
1836{
1837 struct cl_inode_info *lli = cl_i2info(inode);
1838 ENTRY;
1839
1840 if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
1841 /* discard all dirty pages before truncating them, required by
1842 * osc_extent implementation at LU-1030. */
65fb55d1
NY
1843 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
1844 CL_FSYNC_DISCARD, 1);
d7e09d03
PT
1845
1846 truncate_inode_pages(&inode->i_data, 0);
1847
1848 /* Workaround for LU-118 */
1849 if (inode->i_data.nrpages) {
1850 TREE_READ_LOCK_IRQ(&inode->i_data);
1851 TREE_READ_UNLOCK_IRQ(&inode->i_data);
1852 LASSERTF(inode->i_data.nrpages == 0,
1853 "inode=%lu/%u(%p) nrpages=%lu, see "
1854 "http://jira.whamcloud.com/browse/LU-118\n",
1855 inode->i_ino, inode->i_generation, inode,
1856 inode->i_data.nrpages);
1857 }
1858 /* Workaround end */
1859
1860 ll_clear_inode(inode);
1861 clear_inode(inode);
1862
1863 EXIT;
1864}
1865
1866int ll_iocontrol(struct inode *inode, struct file *file,
1867 unsigned int cmd, unsigned long arg)
1868{
1869 struct ll_sb_info *sbi = ll_i2sbi(inode);
1870 struct ptlrpc_request *req = NULL;
1871 int rc, flags = 0;
1872 ENTRY;
1873
1874 switch(cmd) {
1875 case FSFILT_IOC_GETFLAGS: {
1876 struct mdt_body *body;
1877 struct md_op_data *op_data;
1878
1879 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1880 0, 0, LUSTRE_OPC_ANY,
1881 NULL);
1882 if (IS_ERR(op_data))
1883 RETURN(PTR_ERR(op_data));
1884
1885 op_data->op_valid = OBD_MD_FLFLAGS;
1886 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1887 ll_finish_md_op_data(op_data);
1888 if (rc) {
1889 CERROR("failure %d inode %lu\n", rc, inode->i_ino);
1890 RETURN(-abs(rc));
1891 }
1892
1893 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1894
1895 flags = body->flags;
1896
1897 ptlrpc_req_finished(req);
1898
1899 RETURN(put_user(flags, (int *)arg));
1900 }
1901 case FSFILT_IOC_SETFLAGS: {
1902 struct lov_stripe_md *lsm;
1903 struct obd_info oinfo = { { { 0 } } };
1904 struct md_op_data *op_data;
1905
1906 if (get_user(flags, (int *)arg))
1907 RETURN(-EFAULT);
1908
1909 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1910 LUSTRE_OPC_ANY, NULL);
1911 if (IS_ERR(op_data))
1912 RETURN(PTR_ERR(op_data));
1913
1914 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
1915 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1916 rc = md_setattr(sbi->ll_md_exp, op_data,
1917 NULL, 0, NULL, 0, &req, NULL);
1918 ll_finish_md_op_data(op_data);
1919 ptlrpc_req_finished(req);
1920 if (rc)
1921 RETURN(rc);
1922
1923 inode->i_flags = ll_ext_to_inode_flags(flags);
1924
1925 lsm = ccc_inode_lsm_get(inode);
5dd16419
JX
1926 if (!lsm_has_objects(lsm)) {
1927 ccc_inode_lsm_put(inode, lsm);
d7e09d03 1928 RETURN(0);
5dd16419 1929 }
d7e09d03
PT
1930
1931 OBDO_ALLOC(oinfo.oi_oa);
1932 if (!oinfo.oi_oa) {
1933 ccc_inode_lsm_put(inode, lsm);
1934 RETURN(-ENOMEM);
1935 }
1936 oinfo.oi_md = lsm;
1937 oinfo.oi_oa->o_oi = lsm->lsm_oi;
1938 oinfo.oi_oa->o_flags = flags;
1939 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
1940 OBD_MD_FLGROUP;
1941 oinfo.oi_capa = ll_mdscapa_get(inode);
1942 obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
1943 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1944 capa_put(oinfo.oi_capa);
1945 OBDO_FREE(oinfo.oi_oa);
1946 ccc_inode_lsm_put(inode, lsm);
1947
1948 if (rc && rc != -EPERM && rc != -EACCES)
1949 CERROR("osc_setattr_async fails: rc = %d\n", rc);
1950
1951 RETURN(rc);
1952 }
1953 default:
1954 RETURN(-ENOSYS);
1955 }
1956
1957 RETURN(0);
1958}
1959
1960int ll_flush_ctx(struct inode *inode)
1961{
1962 struct ll_sb_info *sbi = ll_i2sbi(inode);
1963
4b1a25f0
PT
1964 CDEBUG(D_SEC, "flush context for user %d\n",
1965 from_kuid(&init_user_ns, current_uid()));
d7e09d03
PT
1966
1967 obd_set_info_async(NULL, sbi->ll_md_exp,
1968 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1969 0, NULL, NULL);
1970 obd_set_info_async(NULL, sbi->ll_dt_exp,
1971 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1972 0, NULL, NULL);
1973 return 0;
1974}
1975
1976/* umount -f client means force down, don't save state */
1977void ll_umount_begin(struct super_block *sb)
1978{
1979 struct ll_sb_info *sbi = ll_s2sbi(sb);
1980 struct obd_device *obd;
1981 struct obd_ioctl_data *ioc_data;
1982 ENTRY;
1983
1984
1985 CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
1986 sb->s_count, atomic_read(&sb->s_active));
1987
1988 obd = class_exp2obd(sbi->ll_md_exp);
1989 if (obd == NULL) {
1990 CERROR("Invalid MDC connection handle "LPX64"\n",
1991 sbi->ll_md_exp->exp_handle.h_cookie);
1992 EXIT;
1993 return;
1994 }
1995 obd->obd_force = 1;
1996
1997 obd = class_exp2obd(sbi->ll_dt_exp);
1998 if (obd == NULL) {
1999 CERROR("Invalid LOV connection handle "LPX64"\n",
2000 sbi->ll_dt_exp->exp_handle.h_cookie);
2001 EXIT;
2002 return;
2003 }
2004 obd->obd_force = 1;
2005
2006 OBD_ALLOC_PTR(ioc_data);
2007 if (ioc_data) {
2008 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
2009 sizeof *ioc_data, ioc_data, NULL);
2010
2011 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
2012 sizeof *ioc_data, ioc_data, NULL);
2013
2014 OBD_FREE_PTR(ioc_data);
2015 }
2016
d7e09d03
PT
2017 /* Really, we'd like to wait until there are no requests outstanding,
2018 * and then continue. For now, we just invalidate the requests,
2019 * schedule() and sleep one second if needed, and hope.
2020 */
2021 schedule();
2022
2023 EXIT;
2024}
2025
2026int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2027{
2028 struct ll_sb_info *sbi = ll_s2sbi(sb);
2029 char *profilenm = get_profile_name(sb);
2030 int err;
2031 __u32 read_only;
2032
2033 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2034 read_only = *flags & MS_RDONLY;
2035 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2036 sizeof(KEY_READ_ONLY),
2037 KEY_READ_ONLY, sizeof(read_only),
2038 &read_only, NULL);
2039 if (err) {
2040 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2041 profilenm, read_only ?
2042 "read-only" : "read-write", err);
2043 return err;
2044 }
2045
2046 if (read_only)
2047 sb->s_flags |= MS_RDONLY;
2048 else
2049 sb->s_flags &= ~MS_RDONLY;
2050
2051 if (sbi->ll_flags & LL_SBI_VERBOSE)
2052 LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2053 read_only ? "read-only" : "read-write");
2054 }
2055 return 0;
2056}
2057
2058int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2059 struct super_block *sb, struct lookup_intent *it)
2060{
2061 struct ll_sb_info *sbi = NULL;
2062 struct lustre_md md;
2063 int rc;
2064 ENTRY;
2065
2066 LASSERT(*inode || sb);
2067 sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2068 rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2069 sbi->ll_md_exp, &md);
2070 if (rc)
2071 RETURN(rc);
2072
2073 if (*inode) {
2074 ll_update_inode(*inode, &md);
2075 } else {
2076 LASSERT(sb != NULL);
2077
2078 /*
2079 * At this point server returns to client's same fid as client
2080 * generated for creating. So using ->fid1 is okay here.
2081 */
2082 LASSERT(fid_is_sane(&md.body->fid1));
2083
2084 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
c1e2699d 2085 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03
PT
2086 &md);
2087 if (*inode == NULL || IS_ERR(*inode)) {
2088#ifdef CONFIG_FS_POSIX_ACL
2089 if (md.posix_acl) {
2090 posix_acl_release(md.posix_acl);
2091 md.posix_acl = NULL;
2092 }
2093#endif
2094 rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2095 *inode = NULL;
2096 CERROR("new_inode -fatal: rc %d\n", rc);
2097 GOTO(out, rc);
2098 }
2099 }
2100
2101 /* Handling piggyback layout lock.
2102 * Layout lock can be piggybacked by getattr and open request.
2103 * The lsm can be applied to inode only if it comes with a layout lock
2104 * otherwise correct layout may be overwritten, for example:
2105 * 1. proc1: mdt returns a lsm but not granting layout
2106 * 2. layout was changed by another client
2107 * 3. proc2: refresh layout and layout lock granted
2108 * 4. proc1: to apply a stale layout */
2109 if (it != NULL && it->d.lustre.it_lock_mode != 0) {
2110 struct lustre_handle lockh;
2111 struct ldlm_lock *lock;
2112
2113 lockh.cookie = it->d.lustre.it_lock_handle;
2114 lock = ldlm_handle2lock(&lockh);
2115 LASSERT(lock != NULL);
2116 if (ldlm_has_layout(lock)) {
2117 struct cl_object_conf conf;
2118
2119 memset(&conf, 0, sizeof(conf));
2120 conf.coc_opc = OBJECT_CONF_SET;
2121 conf.coc_inode = *inode;
2122 conf.coc_lock = lock;
2123 conf.u.coc_md = &md;
2124 (void)ll_layout_conf(*inode, &conf);
2125 }
2126 LDLM_LOCK_PUT(lock);
2127 }
2128
2129out:
2130 if (md.lsm != NULL)
2131 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2132 md_free_lustre_md(sbi->ll_md_exp, &md);
2133 RETURN(rc);
2134}
2135
2136int ll_obd_statfs(struct inode *inode, void *arg)
2137{
2138 struct ll_sb_info *sbi = NULL;
2139 struct obd_export *exp;
2140 char *buf = NULL;
2141 struct obd_ioctl_data *data = NULL;
2142 __u32 type;
2143 __u32 flags;
2144 int len = 0, rc;
2145
2146 if (!inode || !(sbi = ll_i2sbi(inode)))
2147 GOTO(out_statfs, rc = -EINVAL);
2148
2149 rc = obd_ioctl_getdata(&buf, &len, arg);
2150 if (rc)
2151 GOTO(out_statfs, rc);
2152
2153 data = (void*)buf;
2154 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2155 !data->ioc_pbuf1 || !data->ioc_pbuf2)
2156 GOTO(out_statfs, rc = -EINVAL);
2157
2158 if (data->ioc_inllen1 != sizeof(__u32) ||
2159 data->ioc_inllen2 != sizeof(__u32) ||
2160 data->ioc_plen1 != sizeof(struct obd_statfs) ||
2161 data->ioc_plen2 != sizeof(struct obd_uuid))
2162 GOTO(out_statfs, rc = -EINVAL);
2163
2164 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2165 if (type & LL_STATFS_LMV)
2166 exp = sbi->ll_md_exp;
2167 else if (type & LL_STATFS_LOV)
2168 exp = sbi->ll_dt_exp;
2169 else
2170 GOTO(out_statfs, rc = -ENODEV);
2171
2172 flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0;
2173 rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags);
2174 if (rc)
2175 GOTO(out_statfs, rc);
2176out_statfs:
2177 if (buf)
2178 obd_ioctl_freedata(buf, len);
2179 return rc;
2180}
2181
2182int ll_process_config(struct lustre_cfg *lcfg)
2183{
2184 char *ptr;
2185 void *sb;
2186 struct lprocfs_static_vars lvars;
2187 unsigned long x;
2188 int rc = 0;
2189
2190 lprocfs_llite_init_vars(&lvars);
2191
2192 /* The instance name contains the sb: lustre-client-aacfe000 */
2193 ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2194 if (!ptr || !*(++ptr))
2195 return -EINVAL;
2196 if (sscanf(ptr, "%lx", &x) != 1)
2197 return -EINVAL;
2198 sb = (void *)x;
2199 /* This better be a real Lustre superblock! */
2200 LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2201
2202 /* Note we have not called client_common_fill_super yet, so
2203 proc fns must be able to handle that! */
2204 rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2205 lcfg, sb);
2206 if (rc > 0)
2207 rc = 0;
2208 return(rc);
2209}
2210
2211/* this function prepares md_op_data hint for passing ot down to MD stack. */
2212struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
2213 struct inode *i1, struct inode *i2,
2214 const char *name, int namelen,
2215 int mode, __u32 opc, void *data)
2216{
2217 LASSERT(i1 != NULL);
2218
2219 if (namelen > ll_i2sbi(i1)->ll_namelen)
2220 return ERR_PTR(-ENAMETOOLONG);
2221
2222 if (op_data == NULL)
2223 OBD_ALLOC_PTR(op_data);
2224
2225 if (op_data == NULL)
2226 return ERR_PTR(-ENOMEM);
2227
2228 ll_i2gids(op_data->op_suppgids, i1, i2);
2229 op_data->op_fid1 = *ll_inode2fid(i1);
2230 op_data->op_capa1 = ll_mdscapa_get(i1);
2231
2232 if (i2) {
2233 op_data->op_fid2 = *ll_inode2fid(i2);
2234 op_data->op_capa2 = ll_mdscapa_get(i2);
2235 } else {
2236 fid_zero(&op_data->op_fid2);
2237 op_data->op_capa2 = NULL;
2238 }
2239
2240 op_data->op_name = name;
2241 op_data->op_namelen = namelen;
2242 op_data->op_mode = mode;
2243 op_data->op_mod_time = cfs_time_current_sec();
4b1a25f0
PT
2244 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2245 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
d7e09d03
PT
2246 op_data->op_cap = cfs_curproc_cap_pack();
2247 op_data->op_bias = 0;
2248 op_data->op_cli_flags = 0;
2249 if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
2250 filename_is_volatile(name, namelen, NULL))
2251 op_data->op_bias |= MDS_CREATE_VOLATILE;
2252 op_data->op_opc = opc;
2253 op_data->op_mds = 0;
2254 op_data->op_data = data;
2255
2256 /* If the file is being opened after mknod() (normally due to NFS)
2257 * try to use the default stripe data from parent directory for
2258 * allocating OST objects. Try to pass the parent FID to MDS. */
2259 if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
2260 !ll_i2info(i2)->lli_has_smd) {
2261 struct ll_inode_info *lli = ll_i2info(i2);
2262
2263 spin_lock(&lli->lli_lock);
2264 if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
2265 op_data->op_fid1 = lli->lli_pfid;
2266 spin_unlock(&lli->lli_lock);
2267 /** We ignore parent's capability temporary. */
2268 }
2269
2270 /* When called by ll_setattr_raw, file is i1. */
2271 if (LLIF_DATA_MODIFIED & ll_i2info(i1)->lli_flags)
2272 op_data->op_bias |= MDS_DATA_MODIFIED;
2273
2274 return op_data;
2275}
2276
2277void ll_finish_md_op_data(struct md_op_data *op_data)
2278{
2279 capa_put(op_data->op_capa1);
2280 capa_put(op_data->op_capa2);
2281 OBD_FREE_PTR(op_data);
2282}
2283
2284int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2285{
2286 struct ll_sb_info *sbi;
2287
2288 LASSERT((seq != NULL) && (dentry != NULL));
2289 sbi = ll_s2sbi(dentry->d_sb);
2290
2291 if (sbi->ll_flags & LL_SBI_NOLCK)
2292 seq_puts(seq, ",nolock");
2293
2294 if (sbi->ll_flags & LL_SBI_FLOCK)
2295 seq_puts(seq, ",flock");
2296
2297 if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2298 seq_puts(seq, ",localflock");
2299
2300 if (sbi->ll_flags & LL_SBI_USER_XATTR)
2301 seq_puts(seq, ",user_xattr");
2302
2303 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2304 seq_puts(seq, ",lazystatfs");
2305
2306 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2307 seq_puts(seq, ",user_fid2path");
2308
2309 RETURN(0);
2310}
2311
2312/**
2313 * Get obd name by cmd, and copy out to user space
2314 */
2315int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2316{
2317 struct ll_sb_info *sbi = ll_i2sbi(inode);
2318 struct obd_device *obd;
2319 ENTRY;
2320
2321 if (cmd == OBD_IOC_GETDTNAME)
2322 obd = class_exp2obd(sbi->ll_dt_exp);
2323 else if (cmd == OBD_IOC_GETMDNAME)
2324 obd = class_exp2obd(sbi->ll_md_exp);
2325 else
2326 RETURN(-EINVAL);
2327
2328 if (!obd)
2329 RETURN(-ENOENT);
2330
2331 if (copy_to_user((void *)arg, obd->obd_name,
2332 strlen(obd->obd_name) + 1))
2333 RETURN(-EFAULT);
2334
2335 RETURN(0);
2336}
2337
2338/**
2339 * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2340 * fsname will be returned in this buffer; otherwise, a static buffer will be
2341 * used to store the fsname and returned to caller.
2342 */
2343char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2344{
2345 static char fsname_static[MTI_NAME_MAXLEN];
2346 struct lustre_sb_info *lsi = s2lsi(sb);
2347 char *ptr;
2348 int len;
2349
2350 if (buf == NULL) {
2351 /* this means the caller wants to use static buffer
2352 * and it doesn't care about race. Usually this is
2353 * in error reporting path */
2354 buf = fsname_static;
2355 buflen = sizeof(fsname_static);
2356 }
2357
2358 len = strlen(lsi->lsi_lmd->lmd_profile);
2359 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2360 if (ptr && (strcmp(ptr, "-client") == 0))
2361 len -= 7;
2362
2363 if (unlikely(len >= buflen))
2364 len = buflen - 1;
2365 strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2366 buf[len] = '\0';
2367
2368 return buf;
2369}
2370
2371static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
2372{
2373 char *path = NULL;
2374
2375 struct path p;
2376
2377 p.dentry = dentry;
2378 p.mnt = current->fs->root.mnt;
2379 path_get(&p);
2380 path = d_path(&p, buf, bufsize);
2381 path_put(&p);
2382
2383 return path;
2384}
2385
2386void ll_dirty_page_discard_warn(struct page *page, int ioret)
2387{
2388 char *buf, *path = NULL;
2389 struct dentry *dentry = NULL;
2390 struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
2391
2392 /* this can be called inside spin lock so use GFP_ATOMIC. */
2393 buf = (char *)__get_free_page(GFP_ATOMIC);
2394 if (buf != NULL) {
2395 dentry = d_find_alias(page->mapping->host);
2396 if (dentry != NULL)
2397 path = ll_d_path(dentry, buf, PAGE_SIZE);
2398 }
2399
2400 CWARN("%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
2401 "(rc %d)\n", ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
2402 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
2403 PFID(&obj->cob_header.coh_lu.loh_fid),
2404 (path && !IS_ERR(path)) ? path : "", ioret);
2405
2406 if (dentry != NULL)
2407 dput(dentry);
2408
2409 if (buf != NULL)
2410 free_page((unsigned long)buf);
2411}