staging:lustre:lnet lib-md.c erase space before ')' for code style
[linux-block.git] / drivers / staging / lustre / lustre / llite / llite_lib.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/llite/llite_lib.c
37 *
38 * Lustre Light Super operations
39 */
40
41#define DEBUG_SUBSYSTEM S_LLITE
42
43#include <linux/module.h>
a9c7db39 44#include <linux/statfs.h>
d7e09d03 45#include <linux/types.h>
d7e09d03
PT
46#include <linux/mm.h>
47
67a235f5
GKH
48#include "../include/lustre_lite.h"
49#include "../include/lustre_ha.h"
50#include "../include/lustre_dlm.h"
51#include "../include/lprocfs_status.h"
52#include "../include/lustre_disk.h"
53#include "../include/lustre_param.h"
54#include "../include/lustre_log.h"
55#include "../include/cl_object.h"
56#include "../include/obd_cksum.h"
d7e09d03
PT
57#include "llite_internal.h"
58
59struct kmem_cache *ll_file_data_slab;
2c185ffa 60struct proc_dir_entry *proc_lustre_fs_root;
d7e09d03 61
2d95f10e
JH
62static LIST_HEAD(ll_super_blocks);
63static DEFINE_SPINLOCK(ll_sb_lock);
d7e09d03
PT
64
65#ifndef log2
66#define log2(n) ffz(~(n))
67#endif
68
69static struct ll_sb_info *ll_init_sbi(void)
70{
71 struct ll_sb_info *sbi = NULL;
72 unsigned long pages;
73 unsigned long lru_page_max;
74 struct sysinfo si;
75 class_uuid_t uuid;
76 int i;
d7e09d03
PT
77
78 OBD_ALLOC(sbi, sizeof(*sbi));
79 if (!sbi)
0a3bdb00 80 return NULL;
d7e09d03
PT
81
82 spin_lock_init(&sbi->ll_lock);
83 mutex_init(&sbi->ll_lco.lco_lock);
84 spin_lock_init(&sbi->ll_pp_extent_lock);
85 spin_lock_init(&sbi->ll_process_lock);
86 sbi->ll_rw_stats_on = 0;
87
88 si_meminfo(&si);
89 pages = si.totalram - si.totalhigh;
90 if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) {
91 lru_page_max = pages / 2;
92 } else {
93 lru_page_max = (pages / 4) * 3;
94 }
95
c52f69c5 96 /* initialize lru data */
d7e09d03
PT
97 atomic_set(&sbi->ll_cache.ccc_users, 0);
98 sbi->ll_cache.ccc_lru_max = lru_page_max;
99 atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
100 spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
101 INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
102
d7e09d03
PT
103 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
104 SBI_DEFAULT_READAHEAD_MAX);
105 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
106 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
107 SBI_DEFAULT_READAHEAD_WHOLE_MAX;
108 INIT_LIST_HEAD(&sbi->ll_conn_chain);
109 INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
110
111 ll_generate_random_uuid(uuid);
112 class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
113 CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
114
115 spin_lock(&ll_sb_lock);
116 list_add_tail(&sbi->ll_list, &ll_super_blocks);
117 spin_unlock(&ll_sb_lock);
118
119 sbi->ll_flags |= LL_SBI_VERBOSE;
120 sbi->ll_flags |= LL_SBI_CHECKSUM;
121
122 sbi->ll_flags |= LL_SBI_LRU_RESIZE;
123
124 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
125 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
126 pp_r_hist.oh_lock);
127 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
128 pp_w_hist.oh_lock);
129 }
130
131 /* metadata statahead is enabled by default */
132 sbi->ll_sa_max = LL_SA_RPC_DEF;
133 atomic_set(&sbi->ll_sa_total, 0);
134 atomic_set(&sbi->ll_sa_wrong, 0);
135 atomic_set(&sbi->ll_agl_total, 0);
136 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
137
0a3bdb00 138 return sbi;
d7e09d03
PT
139}
140
2d95f10e 141static void ll_free_sbi(struct super_block *sb)
d7e09d03
PT
142{
143 struct ll_sb_info *sbi = ll_s2sbi(sb);
d7e09d03
PT
144
145 if (sbi != NULL) {
146 spin_lock(&ll_sb_lock);
147 list_del(&sbi->ll_list);
148 spin_unlock(&ll_sb_lock);
149 OBD_FREE(sbi, sizeof(*sbi));
150 }
d7e09d03
PT
151}
152
d7e09d03
PT
153static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
154 struct vfsmount *mnt)
155{
ea7893bb 156 struct inode *root = NULL;
d7e09d03
PT
157 struct ll_sb_info *sbi = ll_s2sbi(sb);
158 struct obd_device *obd;
159 struct obd_capa *oc = NULL;
160 struct obd_statfs *osfs = NULL;
161 struct ptlrpc_request *request = NULL;
162 struct obd_connect_data *data = NULL;
163 struct obd_uuid *uuid;
164 struct md_op_data *op_data;
165 struct lustre_md lmd;
21aef7d9 166 u64 valid;
d7e09d03 167 int size, err, checksum;
d7e09d03
PT
168
169 obd = class_name2obd(md);
170 if (!obd) {
171 CERROR("MD %s: not setup or attached\n", md);
0a3bdb00 172 return -EINVAL;
d7e09d03
PT
173 }
174
175 OBD_ALLOC_PTR(data);
176 if (data == NULL)
0a3bdb00 177 return -ENOMEM;
d7e09d03
PT
178
179 OBD_ALLOC_PTR(osfs);
180 if (osfs == NULL) {
181 OBD_FREE_PTR(data);
0a3bdb00 182 return -ENOMEM;
d7e09d03
PT
183 }
184
185 if (proc_lustre_fs_root) {
186 err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
187 dt, md);
188 if (err < 0)
189 CERROR("could not register mount in /proc/fs/lustre\n");
190 }
191
192 /* indicate the features supported by this client */
193 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
194 OBD_CONNECT_ATTRFID |
195 OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
196 OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
197 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
198 OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
199 OBD_CONNECT_RMT_CLIENT | OBD_CONNECT_VBR |
200 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
201 OBD_CONNECT_EINPROGRESS |
202 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
7fc1f831 203 OBD_CONNECT_LAYOUTLOCK |
69342b78
AS
204 OBD_CONNECT_PINGLESS |
205 OBD_CONNECT_MAX_EASIZE |
63d42578
HZ
206 OBD_CONNECT_FLOCK_DEAD |
207 OBD_CONNECT_DISP_STRIPE;
d7e09d03
PT
208
209 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
210 data->ocd_connect_flags |= OBD_CONNECT_SOM;
211
212 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
213 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
214#ifdef CONFIG_FS_POSIX_ACL
215 data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
216#endif
217
218 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
219 /* flag mdc connection as lightweight, only used for test
220 * purpose, use with care */
221 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
222
223 data->ocd_ibits_known = MDS_INODELOCK_FULL;
224 data->ocd_version = LUSTRE_VERSION_CODE;
225
226 if (sb->s_flags & MS_RDONLY)
227 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
228 if (sbi->ll_flags & LL_SBI_USER_XATTR)
229 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
230
231#ifdef HAVE_MS_FLOCK_LOCK
232 /* force vfs to use lustre handler for flock() calls - bug 10743 */
233 sb->s_flags |= MS_FLOCK_LOCK;
234#endif
235#ifdef MS_HAS_NEW_AOPS
236 sb->s_flags |= MS_HAS_NEW_AOPS;
237#endif
238
239 if (sbi->ll_flags & LL_SBI_FLOCK)
240 sbi->ll_fop = &ll_file_operations_flock;
241 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
242 sbi->ll_fop = &ll_file_operations;
243 else
244 sbi->ll_fop = &ll_file_operations_noflock;
245
246 /* real client */
247 data->ocd_connect_flags |= OBD_CONNECT_REAL;
248 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
249 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
250
251 data->ocd_brw_size = MD_MAX_BRW_SIZE;
252
253 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid, data, NULL);
254 if (err == -EBUSY) {
255 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
256 "recovery, of which this client is not a "
257 "part. Please wait for recovery to complete,"
258 " abort, or time out.\n", md);
259 GOTO(out, err);
260 } else if (err) {
261 CERROR("cannot connect to %s: rc = %d\n", md, err);
262 GOTO(out, err);
263 }
264
265 sbi->ll_md_exp->exp_connect_data = *data;
266
267 err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
268 LUSTRE_SEQ_METADATA);
269 if (err) {
270 CERROR("%s: Can't init metadata layer FID infrastructure, "
271 "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
272 GOTO(out_md, err);
273 }
274
275 /* For mount, we only need fs info from MDT0, and also in DNE, it
276 * can make sure the client can be mounted as long as MDT0 is
d0a0acc3 277 * available */
d7e09d03
PT
278 err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
279 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
280 OBD_STATFS_FOR_MDT0);
281 if (err)
282 GOTO(out_md_fid, err);
283
284 /* This needs to be after statfs to ensure connect has finished.
285 * Note that "data" does NOT contain the valid connect reply.
286 * If connecting to a 1.8 server there will be no LMV device, so
287 * we can access the MDC export directly and exp_connect_flags will
288 * be non-zero, but if accessing an upgraded 2.1 server it will
289 * have the correct flags filled in.
290 * XXX: fill in the LMV exp_connect_flags from MDC(s). */
291 valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
292 if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
293 valid != CLIENT_CONNECT_MDT_REQD) {
294 char *buf;
295
296 OBD_ALLOC_WAIT(buf, PAGE_CACHE_SIZE);
297 obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
298 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
299 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
300 "feature(s) needed for correct operation "
301 "of this client (%s). Please upgrade "
302 "server or downgrade client.\n",
303 sbi->ll_md_exp->exp_obd->obd_name, buf);
304 OBD_FREE(buf, PAGE_CACHE_SIZE);
305 GOTO(out_md_fid, err = -EPROTO);
306 }
307
308 size = sizeof(*data);
309 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
310 KEY_CONN_DATA, &size, data, NULL);
311 if (err) {
312 CERROR("%s: Get connect data failed: rc = %d\n",
313 sbi->ll_md_exp->exp_obd->obd_name, err);
314 GOTO(out_md_fid, err);
315 }
316
317 LASSERT(osfs->os_bsize);
318 sb->s_blocksize = osfs->os_bsize;
319 sb->s_blocksize_bits = log2(osfs->os_bsize);
320 sb->s_magic = LL_SUPER_MAGIC;
321 sb->s_maxbytes = MAX_LFS_FILESIZE;
322 sbi->ll_namelen = osfs->os_namelen;
323 sbi->ll_max_rw_chunk = LL_DEFAULT_MAX_RW_CHUNK;
324
325 if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
326 !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
327 LCONSOLE_INFO("Disabling user_xattr feature because "
328 "it is not supported on the server\n");
329 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
330 }
331
332 if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
333#ifdef MS_POSIXACL
334 sb->s_flags |= MS_POSIXACL;
335#endif
336 sbi->ll_flags |= LL_SBI_ACL;
337 } else {
338 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
339#ifdef MS_POSIXACL
340 sb->s_flags &= ~MS_POSIXACL;
341#endif
342 sbi->ll_flags &= ~LL_SBI_ACL;
343 }
344
345 if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
346 if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
347 sbi->ll_flags |= LL_SBI_RMT_CLIENT;
348 LCONSOLE_INFO("client is set as remote by default.\n");
349 }
350 } else {
351 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
352 sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
353 LCONSOLE_INFO("client claims to be remote, but server "
354 "rejected, forced to be local.\n");
355 }
356 }
357
358 if (data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) {
359 LCONSOLE_INFO("client enabled MDS capability!\n");
360 sbi->ll_flags |= LL_SBI_MDS_CAPA;
361 }
362
363 if (data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA) {
364 LCONSOLE_INFO("client enabled OSS capability!\n");
365 sbi->ll_flags |= LL_SBI_OSS_CAPA;
366 }
367
368 if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
369 sbi->ll_flags |= LL_SBI_64BIT_HASH;
370
371 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
372 sbi->ll_md_brw_size = data->ocd_brw_size;
373 else
374 sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
375
376 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
377 LCONSOLE_INFO("Layout lock feature supported.\n");
378 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
379 }
380
7fc1f831
AP
381 if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
382 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
383 LCONSOLE_INFO(
384 "%s: disabling xattr cache due to unknown maximum xattr size.\n",
385 dt);
386 } else {
387 sbi->ll_flags |= LL_SBI_XATTR_CACHE;
388 sbi->ll_xattr_cache_enabled = 1;
389 }
390 }
391
d7e09d03
PT
392 obd = class_name2obd(dt);
393 if (!obd) {
394 CERROR("DT %s: not setup or attached\n", dt);
395 GOTO(out_md_fid, err = -ENODEV);
396 }
397
398 data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
399 OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
400 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
401 OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
402 OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
403 OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
404 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH |
405 OBD_CONNECT_MAXBYTES |
406 OBD_CONNECT_EINPROGRESS |
407 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
408 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
409
410 if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
411 data->ocd_connect_flags |= OBD_CONNECT_SOM;
412
413 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
414 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
415 * disabled by default, because it can still be enabled on the
416 * fly via /proc. As a consequence, we still need to come to an
417 * agreement on the supported algorithms at connect time */
418 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
419
420 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
421 data->ocd_cksum_types = OBD_CKSUM_ADLER;
422 else
423 data->ocd_cksum_types = cksum_types_supported_client();
424 }
425
426 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
427 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
428 data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
429
55f5a824 430 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d "
d7e09d03
PT
431 "ocd_grant: %d\n", data->ocd_connect_flags,
432 data->ocd_version, data->ocd_grant);
433
434 obd->obd_upcall.onu_owner = &sbi->ll_lco;
435 obd->obd_upcall.onu_upcall = cl_ocd_update;
436
437 data->ocd_brw_size = DT_MAX_BRW_SIZE;
438
439 err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
440 NULL);
441 if (err == -EBUSY) {
442 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
443 "recovery, of which this client is not a "
444 "part. Please wait for recovery to "
445 "complete, abort, or time out.\n", dt);
446 GOTO(out_md, err);
447 } else if (err) {
448 CERROR("%s: Cannot connect to %s: rc = %d\n",
449 sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
450 GOTO(out_md, err);
451 }
452
453 sbi->ll_dt_exp->exp_connect_data = *data;
454
455 err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
456 LUSTRE_SEQ_METADATA);
457 if (err) {
458 CERROR("%s: Can't init data layer FID infrastructure, "
459 "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
460 GOTO(out_dt, err);
461 }
462
463 mutex_lock(&sbi->ll_lco.lco_lock);
464 sbi->ll_lco.lco_flags = data->ocd_connect_flags;
465 sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
466 sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
467 mutex_unlock(&sbi->ll_lco.lco_lock);
468
469 fid_zero(&sbi->ll_root_fid);
470 err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
471 if (err) {
472 CERROR("cannot mds_connect: rc = %d\n", err);
473 GOTO(out_lock_cn_cb, err);
474 }
475 if (!fid_is_sane(&sbi->ll_root_fid)) {
476 CERROR("%s: Invalid root fid "DFID" during mount\n",
477 sbi->ll_md_exp->exp_obd->obd_name,
478 PFID(&sbi->ll_root_fid));
479 GOTO(out_lock_cn_cb, err = -EINVAL);
480 }
481 CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
482
483 sb->s_op = &lustre_super_operations;
484#if THREAD_SIZE >= 8192 /*b=17630*/
485 sb->s_export_op = &lustre_export_operations;
486#endif
487
488 /* make root inode
489 * XXX: move this to after cbd setup? */
490 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMDSCAPA;
491 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
492 valid |= OBD_MD_FLRMTPERM;
493 else if (sbi->ll_flags & LL_SBI_ACL)
494 valid |= OBD_MD_FLACL;
495
496 OBD_ALLOC_PTR(op_data);
497 if (op_data == NULL)
498 GOTO(out_lock_cn_cb, err = -ENOMEM);
499
500 op_data->op_fid1 = sbi->ll_root_fid;
501 op_data->op_mode = 0;
502 op_data->op_capa1 = oc;
503 op_data->op_valid = valid;
504
505 err = md_getattr(sbi->ll_md_exp, op_data, &request);
506 if (oc)
507 capa_put(oc);
508 OBD_FREE_PTR(op_data);
509 if (err) {
510 CERROR("%s: md_getattr failed for root: rc = %d\n",
511 sbi->ll_md_exp->exp_obd->obd_name, err);
512 GOTO(out_lock_cn_cb, err);
513 }
514
515 err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
516 sbi->ll_md_exp, &lmd);
517 if (err) {
518 CERROR("failed to understand root inode md: rc = %d\n", err);
519 ptlrpc_req_finished(request);
520 GOTO(out_lock_cn_cb, err);
521 }
522
523 LASSERT(fid_is_sane(&sbi->ll_root_fid));
524 root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
c1e2699d 525 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03
PT
526 &lmd);
527 md_free_lustre_md(sbi->ll_md_exp, &lmd);
528 ptlrpc_req_finished(request);
529
530 if (root == NULL || IS_ERR(root)) {
531 if (lmd.lsm)
532 obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
533#ifdef CONFIG_FS_POSIX_ACL
534 if (lmd.posix_acl) {
535 posix_acl_release(lmd.posix_acl);
536 lmd.posix_acl = NULL;
537 }
538#endif
539 err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
540 root = NULL;
541 CERROR("lustre_lite: bad iget4 for root\n");
542 GOTO(out_root, err);
543 }
544
545 err = ll_close_thread_start(&sbi->ll_lcq);
546 if (err) {
547 CERROR("cannot start close thread: rc %d\n", err);
548 GOTO(out_root, err);
549 }
550
551#ifdef CONFIG_FS_POSIX_ACL
552 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
553 rct_init(&sbi->ll_rct);
554 et_init(&sbi->ll_et);
555 }
556#endif
557
558 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
559 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
560 KEY_CHECKSUM, sizeof(checksum), &checksum,
561 NULL);
562 cl_sb_init(sb);
563
564 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
565 KEY_CACHE_SET, sizeof(sbi->ll_cache),
566 &sbi->ll_cache, NULL);
567
568 sb->s_root = d_make_root(root);
569 if (sb->s_root == NULL) {
570 CERROR("%s: can't make root dentry\n",
571 ll_get_fsname(sb, NULL, 0));
572 GOTO(out_root, err = -ENOMEM);
573 }
574
d7e09d03
PT
575 sbi->ll_sdev_orig = sb->s_dev;
576
577 /* We set sb->s_dev equal on all lustre clients in order to support
578 * NFS export clustering. NFSD requires that the FSID be the same
579 * on all clients. */
580 /* s_dev is also used in lt_compare() to compare two fs, but that is
581 * only a node-local comparison. */
582 uuid = obd_get_uuid(sbi->ll_md_exp);
bd994071 583 if (uuid != NULL) {
d7e09d03 584 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
bd994071
FY
585 get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
586 }
d7e09d03
PT
587
588 if (data != NULL)
589 OBD_FREE_PTR(data);
590 if (osfs != NULL)
591 OBD_FREE_PTR(osfs);
592
0a3bdb00 593 return err;
d7e09d03 594out_root:
ddafd514 595 iput(root);
d7e09d03
PT
596out_lock_cn_cb:
597 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
598out_dt:
599 obd_disconnect(sbi->ll_dt_exp);
600 sbi->ll_dt_exp = NULL;
601 /* Make sure all OScs are gone, since cl_cache is accessing sbi. */
602 obd_zombie_barrier();
603out_md_fid:
604 obd_fid_fini(sbi->ll_md_exp->exp_obd);
605out_md:
606 obd_disconnect(sbi->ll_md_exp);
607 sbi->ll_md_exp = NULL;
608out:
609 if (data != NULL)
610 OBD_FREE_PTR(data);
611 if (osfs != NULL)
612 OBD_FREE_PTR(osfs);
613 lprocfs_unregister_mountpoint(sbi);
614 return err;
615}
616
617int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
618{
619 int size, rc;
620
621 *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
622 size = sizeof(int);
623 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
624 KEY_MAX_EASIZE, &size, lmmsize, NULL);
625 if (rc)
626 CERROR("Get max mdsize error rc %d \n", rc);
627
0a3bdb00 628 return rc;
44779340
BB
629}
630
631int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
632{
633 int size, rc;
634
635 size = sizeof(int);
636 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
637 KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
638 if (rc)
639 CERROR("Get default mdsize error rc %d\n", rc);
640
641 return rc;
642}
643
644int ll_get_max_cookiesize(struct ll_sb_info *sbi, int *lmmsize)
645{
646 int size, rc;
647
648 size = sizeof(int);
649 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_COOKIESIZE),
650 KEY_MAX_COOKIESIZE, &size, lmmsize, NULL);
651 if (rc)
652 CERROR("Get max cookiesize error rc %d\n", rc);
653
654 return rc;
655}
656
657int ll_get_default_cookiesize(struct ll_sb_info *sbi, int *lmmsize)
658{
659 int size, rc;
660
661 size = sizeof(int);
662 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_COOKIESIZE),
663 KEY_DEFAULT_COOKIESIZE, &size, lmmsize, NULL);
664 if (rc)
665 CERROR("Get default cookiesize error rc %d\n", rc);
666
667 return rc;
d7e09d03
PT
668}
669
2d95f10e 670static void ll_dump_inode(struct inode *inode)
d7e09d03
PT
671{
672 struct ll_d_hlist_node *tmp;
673 int dentry_count = 0;
674
675 LASSERT(inode != NULL);
676
677 ll_d_hlist_for_each(tmp, &inode->i_dentry)
678 dentry_count++;
679
680 CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
681 inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
682 inode->i_mode, atomic_read(&inode->i_count), dentry_count);
683}
684
685void lustre_dump_dentry(struct dentry *dentry, int recur)
686{
687 struct list_head *tmp;
688 int subdirs = 0;
689
690 LASSERT(dentry != NULL);
691
692 list_for_each(tmp, &dentry->d_subdirs)
693 subdirs++;
694
695 CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u,"
696 " flags=0x%x, fsdata=%p, %d subdirs\n", dentry,
697 dentry->d_name.len, dentry->d_name.name,
698 dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
193deee1 699 dentry->d_parent, dentry->d_inode, d_count(dentry),
d7e09d03
PT
700 dentry->d_flags, dentry->d_fsdata, subdirs);
701 if (dentry->d_inode != NULL)
702 ll_dump_inode(dentry->d_inode);
703
704 if (recur == 0)
705 return;
706
707 list_for_each(tmp, &dentry->d_subdirs) {
708 struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child);
709 lustre_dump_dentry(d, recur - 1);
710 }
711}
712
2d95f10e 713static void client_common_put_super(struct super_block *sb)
d7e09d03
PT
714{
715 struct ll_sb_info *sbi = ll_s2sbi(sb);
d7e09d03
PT
716
717#ifdef CONFIG_FS_POSIX_ACL
718 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
719 et_fini(&sbi->ll_et);
720 rct_fini(&sbi->ll_rct);
721 }
722#endif
723
724 ll_close_thread_shutdown(sbi->ll_lcq);
725
726 cl_sb_fini(sb);
727
728 list_del(&sbi->ll_conn_chain);
729
730 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
731 obd_disconnect(sbi->ll_dt_exp);
732 sbi->ll_dt_exp = NULL;
733 /* wait till all OSCs are gone, since cl_cache is accessing sbi.
734 * see LU-2543. */
735 obd_zombie_barrier();
736
737 lprocfs_unregister_mountpoint(sbi);
738
739 obd_fid_fini(sbi->ll_md_exp->exp_obd);
740 obd_disconnect(sbi->ll_md_exp);
741 sbi->ll_md_exp = NULL;
d7e09d03
PT
742}
743
744void ll_kill_super(struct super_block *sb)
745{
746 struct ll_sb_info *sbi;
747
d7e09d03
PT
748 /* not init sb ?*/
749 if (!(sb->s_flags & MS_ACTIVE))
750 return;
751
752 sbi = ll_s2sbi(sb);
d0a0acc3 753 /* we need to restore s_dev from changed for clustered NFS before put_super
d7e09d03
PT
754 * because new kernels have cached s_dev and change sb->s_dev in
755 * put_super not affected real removing devices */
65fb55d1 756 if (sbi) {
d7e09d03 757 sb->s_dev = sbi->ll_sdev_orig;
65fb55d1
NY
758 sbi->ll_umounting = 1;
759 }
d7e09d03
PT
760}
761
d7e09d03
PT
762static inline int ll_set_opt(const char *opt, char *data, int fl)
763{
764 if (strncmp(opt, data, strlen(opt)) != 0)
fbe7c6c7 765 return 0;
d7e09d03 766 else
fbe7c6c7 767 return fl;
d7e09d03
PT
768}
769
770/* non-client-specific mount options are parsed in lmd_parse */
771static int ll_options(char *options, int *flags)
772{
773 int tmp;
774 char *s1 = options, *s2;
d7e09d03
PT
775
776 if (!options)
0a3bdb00 777 return 0;
d7e09d03
PT
778
779 CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
780
781 while (*s1) {
782 CDEBUG(D_SUPER, "next opt=%s\n", s1);
783 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
784 if (tmp) {
785 *flags |= tmp;
786 goto next;
787 }
788 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
789 if (tmp) {
790 *flags |= tmp;
791 goto next;
792 }
793 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
794 if (tmp) {
795 *flags |= tmp;
796 goto next;
797 }
798 tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
799 if (tmp) {
800 *flags &= ~tmp;
801 goto next;
802 }
803 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
804 if (tmp) {
805 *flags |= tmp;
806 goto next;
807 }
808 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
809 if (tmp) {
810 *flags &= ~tmp;
811 goto next;
812 }
813#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 5, 50, 0)
814 tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
815 if (tmp) {
816 /* Ignore deprecated mount option. The client will
817 * always try to mount with ACL support, whether this
818 * is used depends on whether server supports it. */
819 LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
820 "mount option 'acl'.\n");
821 goto next;
822 }
823 tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
824 if (tmp) {
825 LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
826 "mount option 'noacl'.\n");
827 goto next;
828 }
829#else
830#warning "{no}acl options have been deprecated since 1.8, please remove them"
831#endif
832 tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
833 if (tmp) {
834 *flags |= tmp;
835 goto next;
836 }
837 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
838 if (tmp) {
839 *flags |= tmp;
840 goto next;
841 }
842 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
843 if (tmp) {
844 *flags &= ~tmp;
845 goto next;
846 }
847
848 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
849 if (tmp) {
850 *flags |= tmp;
851 goto next;
852 }
853 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
854 if (tmp) {
855 *flags &= ~tmp;
856 goto next;
857 }
858 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
859 if (tmp) {
860 *flags |= tmp;
861 goto next;
862 }
863 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
864 if (tmp) {
865 *flags &= ~tmp;
866 goto next;
867 }
868 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
869 if (tmp) {
870 *flags |= tmp;
871 goto next;
872 }
873 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
874 if (tmp) {
875 *flags &= ~tmp;
876 goto next;
877 }
878 tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
879 if (tmp) {
880 *flags |= tmp;
881 goto next;
882 }
883 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
884 if (tmp) {
885 *flags |= tmp;
886 goto next;
887 }
888 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
889 if (tmp) {
890 *flags |= tmp;
891 goto next;
892 }
893 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
894 if (tmp) {
895 *flags &= ~tmp;
896 goto next;
897 }
898 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
899 s1);
0a3bdb00 900 return -EINVAL;
d7e09d03
PT
901
902next:
903 /* Find next opt */
904 s2 = strchr(s1, ',');
905 if (s2 == NULL)
906 break;
907 s1 = s2 + 1;
908 }
0a3bdb00 909 return 0;
d7e09d03
PT
910}
911
912void ll_lli_init(struct ll_inode_info *lli)
913{
914 lli->lli_inode_magic = LLI_INODE_MAGIC;
915 lli->lli_flags = 0;
916 lli->lli_ioepoch = 0;
917 lli->lli_maxbytes = MAX_LFS_FILESIZE;
918 spin_lock_init(&lli->lli_lock);
919 lli->lli_posix_acl = NULL;
920 lli->lli_remote_perms = NULL;
921 mutex_init(&lli->lli_rmtperm_mutex);
922 /* Do not set lli_fid, it has been initialized already. */
923 fid_zero(&lli->lli_pfid);
924 INIT_LIST_HEAD(&lli->lli_close_list);
925 INIT_LIST_HEAD(&lli->lli_oss_capas);
926 atomic_set(&lli->lli_open_count, 0);
927 lli->lli_mds_capa = NULL;
928 lli->lli_rmtperm_time = 0;
929 lli->lli_pending_och = NULL;
930 lli->lli_mds_read_och = NULL;
931 lli->lli_mds_write_och = NULL;
932 lli->lli_mds_exec_och = NULL;
933 lli->lli_open_fd_read_count = 0;
934 lli->lli_open_fd_write_count = 0;
935 lli->lli_open_fd_exec_count = 0;
936 mutex_init(&lli->lli_och_mutex);
937 spin_lock_init(&lli->lli_agl_lock);
938 lli->lli_has_smd = false;
09aed8a5
JX
939 spin_lock_init(&lli->lli_layout_lock);
940 ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
d7e09d03
PT
941 lli->lli_clob = NULL;
942
7fc1f831
AP
943 init_rwsem(&lli->lli_xattrs_list_rwsem);
944 mutex_init(&lli->lli_xattrs_enq_lock);
945
d7e09d03
PT
946 LASSERT(lli->lli_vfs_inode.i_mode != 0);
947 if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
948 mutex_init(&lli->lli_readdir_mutex);
949 lli->lli_opendir_key = NULL;
950 lli->lli_sai = NULL;
d7e09d03
PT
951 spin_lock_init(&lli->lli_sa_lock);
952 lli->lli_opendir_pid = 0;
953 } else {
47a57bde 954 mutex_init(&lli->lli_size_mutex);
d7e09d03
PT
955 lli->lli_symlink_name = NULL;
956 init_rwsem(&lli->lli_trunc_sem);
957 mutex_init(&lli->lli_write_mutex);
958 init_rwsem(&lli->lli_glimpse_sem);
959 lli->lli_glimpse_time = 0;
960 INIT_LIST_HEAD(&lli->lli_agl_list);
961 lli->lli_agl_index = 0;
962 lli->lli_async_rc = 0;
d7e09d03
PT
963 }
964 mutex_init(&lli->lli_layout_mutex);
965}
966
967static inline int ll_bdi_register(struct backing_dev_info *bdi)
968{
969 static atomic_t ll_bdi_num = ATOMIC_INIT(0);
970
971 bdi->name = "lustre";
972 return bdi_register(bdi, NULL, "lustre-%d",
973 atomic_inc_return(&ll_bdi_num));
974}
975
976int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
977{
978 struct lustre_profile *lprof = NULL;
979 struct lustre_sb_info *lsi = s2lsi(sb);
980 struct ll_sb_info *sbi;
981 char *dt = NULL, *md = NULL;
982 char *profilenm = get_profile_name(sb);
983 struct config_llog_instance *cfg;
984 /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
985 const int instlen = sizeof(cfg->cfg_instance) * 2 + 2;
986 int err;
d7e09d03
PT
987
988 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
989
990 OBD_ALLOC_PTR(cfg);
991 if (cfg == NULL)
0a3bdb00 992 return -ENOMEM;
d7e09d03
PT
993
994 try_module_get(THIS_MODULE);
995
996 /* client additional sb info */
997 lsi->lsi_llsbi = sbi = ll_init_sbi();
998 if (!sbi) {
999 module_put(THIS_MODULE);
1000 OBD_FREE_PTR(cfg);
0a3bdb00 1001 return -ENOMEM;
d7e09d03
PT
1002 }
1003
1004 err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
1005 if (err)
1006 GOTO(out_free, err);
1007
1008 err = bdi_init(&lsi->lsi_bdi);
1009 if (err)
1010 GOTO(out_free, err);
1011 lsi->lsi_flags |= LSI_BDI_INITIALIZED;
1012 lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
1013 err = ll_bdi_register(&lsi->lsi_bdi);
1014 if (err)
1015 GOTO(out_free, err);
1016
1017 sb->s_bdi = &lsi->lsi_bdi;
3ea8f3bc
LS
1018 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
1019 sb->s_d_op = &ll_d_ops;
d7e09d03
PT
1020
1021 /* Generate a string unique to this super, in case some joker tries
1022 to mount the same fs at two mount points.
1023 Use the address of the super itself.*/
1024 cfg->cfg_instance = sb;
1025 cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
1026 cfg->cfg_callback = class_config_llog_handler;
1027 /* set up client obds */
1028 err = lustre_process_log(sb, profilenm, cfg);
1029 if (err < 0) {
1030 CERROR("Unable to process log: %d\n", err);
1031 GOTO(out_free, err);
1032 }
1033
1034 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
1035 lprof = class_get_profile(profilenm);
1036 if (lprof == NULL) {
1037 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
1038 " read from the MGS. Does that filesystem "
1039 "exist?\n", profilenm);
1040 GOTO(out_free, err = -EINVAL);
1041 }
1042 CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
1043 lprof->lp_md, lprof->lp_dt);
1044
1045 OBD_ALLOC(dt, strlen(lprof->lp_dt) + instlen + 2);
1046 if (!dt)
1047 GOTO(out_free, err = -ENOMEM);
1048 sprintf(dt, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
1049
1050 OBD_ALLOC(md, strlen(lprof->lp_md) + instlen + 2);
1051 if (!md)
1052 GOTO(out_free, err = -ENOMEM);
1053 sprintf(md, "%s-%p", lprof->lp_md, cfg->cfg_instance);
1054
1055 /* connections, registrations, sb setup */
1056 err = client_common_fill_super(sb, md, dt, mnt);
1057
1058out_free:
1059 if (md)
1060 OBD_FREE(md, strlen(lprof->lp_md) + instlen + 2);
1061 if (dt)
1062 OBD_FREE(dt, strlen(lprof->lp_dt) + instlen + 2);
1063 if (err)
1064 ll_put_super(sb);
1065 else if (sbi->ll_flags & LL_SBI_VERBOSE)
1066 LCONSOLE_WARN("Mounted %s\n", profilenm);
1067
1068 OBD_FREE_PTR(cfg);
0a3bdb00 1069 return err;
d7e09d03
PT
1070} /* ll_fill_super */
1071
d7e09d03
PT
1072void ll_put_super(struct super_block *sb)
1073{
7d4bae45 1074 struct config_llog_instance cfg, params_cfg;
d7e09d03
PT
1075 struct obd_device *obd;
1076 struct lustre_sb_info *lsi = s2lsi(sb);
1077 struct ll_sb_info *sbi = ll_s2sbi(sb);
1078 char *profilenm = get_profile_name(sb);
c52f69c5 1079 int next, force = 1;
d7e09d03
PT
1080
1081 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
1082
1083 ll_print_capa_stat(sbi);
1084
1085 cfg.cfg_instance = sb;
1086 lustre_end_log(sb, profilenm, &cfg);
1087
7d4bae45
AB
1088 params_cfg.cfg_instance = sb;
1089 lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
1090
d7e09d03
PT
1091 if (sbi->ll_md_exp) {
1092 obd = class_exp2obd(sbi->ll_md_exp);
1093 if (obd)
1094 force = obd->obd_force;
1095 }
1096
d7e09d03
PT
1097 /* We need to set force before the lov_disconnect in
1098 lustre_common_put_super, since l_d cleans up osc's as well. */
1099 if (force) {
1100 next = 0;
1101 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1102 &next)) != NULL) {
1103 obd->obd_force = force;
1104 }
1105 }
1106
1107 if (sbi->ll_lcq) {
1108 /* Only if client_common_fill_super succeeded */
1109 client_common_put_super(sb);
1110 }
1111
1112 next = 0;
1113 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
1114 class_manual_cleanup(obd);
1115 }
1116
1117 if (sbi->ll_flags & LL_SBI_VERBOSE)
1118 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1119
1120 if (profilenm)
1121 class_del_profile(profilenm);
1122
1123 if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1124 bdi_destroy(&lsi->lsi_bdi);
1125 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1126 }
1127
1128 ll_free_sbi(sb);
1129 lsi->lsi_llsbi = NULL;
1130
1131 lustre_common_put_super(sb);
1132
1133 module_put(THIS_MODULE);
d7e09d03
PT
1134} /* client_put_super */
1135
1136struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1137{
1138 struct inode *inode = NULL;
1139
1140 /* NOTE: we depend on atomic igrab() -bzzz */
1141 lock_res_and_lock(lock);
1142 if (lock->l_resource->lr_lvb_inode) {
aff9d8e8 1143 struct ll_inode_info *lli;
d7e09d03
PT
1144 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1145 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1146 inode = igrab(lock->l_resource->lr_lvb_inode);
1147 } else {
1148 inode = lock->l_resource->lr_lvb_inode;
1149 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
1150 D_WARNING, lock, "lr_lvb_inode %p is "
1151 "bogus: magic %08x",
1152 lock->l_resource->lr_lvb_inode,
1153 lli->lli_inode_magic);
1154 inode = NULL;
1155 }
1156 }
1157 unlock_res_and_lock(lock);
1158 return inode;
1159}
1160
d7e09d03
PT
1161void ll_clear_inode(struct inode *inode)
1162{
1163 struct ll_inode_info *lli = ll_i2info(inode);
1164 struct ll_sb_info *sbi = ll_i2sbi(inode);
d7e09d03
PT
1165
1166 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
1167 inode->i_generation, inode);
1168
1169 if (S_ISDIR(inode->i_mode)) {
1170 /* these should have been cleared in ll_file_release */
1171 LASSERT(lli->lli_opendir_key == NULL);
1172 LASSERT(lli->lli_sai == NULL);
1173 LASSERT(lli->lli_opendir_pid == 0);
1174 }
1175
ae5ef67b 1176 spin_lock(&lli->lli_lock);
d7e09d03 1177 ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
ae5ef67b 1178 spin_unlock(&lli->lli_lock);
d7e09d03
PT
1179 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1180
1181 LASSERT(!lli->lli_open_fd_write_count);
1182 LASSERT(!lli->lli_open_fd_read_count);
1183 LASSERT(!lli->lli_open_fd_exec_count);
1184
1185 if (lli->lli_mds_write_och)
1186 ll_md_real_close(inode, FMODE_WRITE);
1187 if (lli->lli_mds_exec_och)
1188 ll_md_real_close(inode, FMODE_EXEC);
1189 if (lli->lli_mds_read_och)
1190 ll_md_real_close(inode, FMODE_READ);
1191
1192 if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
1193 OBD_FREE(lli->lli_symlink_name,
1194 strlen(lli->lli_symlink_name) + 1);
1195 lli->lli_symlink_name = NULL;
1196 }
1197
7fc1f831
AP
1198 ll_xattr_cache_destroy(inode);
1199
d7e09d03
PT
1200 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1201 LASSERT(lli->lli_posix_acl == NULL);
1202 if (lli->lli_remote_perms) {
1203 free_rmtperm_hash(lli->lli_remote_perms);
1204 lli->lli_remote_perms = NULL;
1205 }
1206 }
1207#ifdef CONFIG_FS_POSIX_ACL
1208 else if (lli->lli_posix_acl) {
1209 LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
1210 LASSERT(lli->lli_remote_perms == NULL);
1211 posix_acl_release(lli->lli_posix_acl);
1212 lli->lli_posix_acl = NULL;
1213 }
1214#endif
1215 lli->lli_inode_magic = LLI_INODE_DEAD;
1216
1217 ll_clear_inode_capas(inode);
1218 if (!S_ISDIR(inode->i_mode))
1219 LASSERT(list_empty(&lli->lli_agl_list));
1220
1221 /*
1222 * XXX This has to be done before lsm is freed below, because
1223 * cl_object still uses inode lsm.
1224 */
1225 cl_inode_fini(inode);
1226 lli->lli_has_smd = false;
d7e09d03
PT
1227}
1228
1229int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
1230 struct md_open_data **mod)
1231{
1232 struct lustre_md md;
1233 struct inode *inode = dentry->d_inode;
1234 struct ll_sb_info *sbi = ll_i2sbi(inode);
1235 struct ptlrpc_request *request = NULL;
1236 int rc, ia_valid;
d7e09d03
PT
1237
1238 op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1239 LUSTRE_OPC_ANY, NULL);
1240 if (IS_ERR(op_data))
0a3bdb00 1241 return PTR_ERR(op_data);
d7e09d03
PT
1242
1243 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
1244 &request, mod);
1245 if (rc) {
1246 ptlrpc_req_finished(request);
1247 if (rc == -ENOENT) {
1248 clear_nlink(inode);
1249 /* Unlinked special device node? Or just a race?
1250 * Pretend we done everything. */
1251 if (!S_ISREG(inode->i_mode) &&
1252 !S_ISDIR(inode->i_mode)) {
1253 ia_valid = op_data->op_attr.ia_valid;
1254 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1255 rc = simple_setattr(dentry, &op_data->op_attr);
1256 op_data->op_attr.ia_valid = ia_valid;
1257 }
1258 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1259 CERROR("md_setattr fails: rc = %d\n", rc);
1260 }
0a3bdb00 1261 return rc;
d7e09d03
PT
1262 }
1263
1264 rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1265 sbi->ll_md_exp, &md);
1266 if (rc) {
1267 ptlrpc_req_finished(request);
0a3bdb00 1268 return rc;
d7e09d03
PT
1269 }
1270
251c4317
JH
1271 ia_valid = op_data->op_attr.ia_valid;
1272 /* inode size will be in ll_setattr_ost, can't do it now since dirty
1273 * cache is not cleared yet. */
1274 op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1275 rc = simple_setattr(dentry, &op_data->op_attr);
1276 op_data->op_attr.ia_valid = ia_valid;
1277
d7e09d03
PT
1278 /* Extract epoch data if obtained. */
1279 op_data->op_handle = md.body->handle;
1280 op_data->op_ioepoch = md.body->ioepoch;
1281
1282 ll_update_inode(inode, &md);
1283 ptlrpc_req_finished(request);
1284
0a3bdb00 1285 return rc;
d7e09d03
PT
1286}
1287
1288/* Close IO epoch and send Size-on-MDS attribute update. */
1289static int ll_setattr_done_writing(struct inode *inode,
1290 struct md_op_data *op_data,
1291 struct md_open_data *mod)
1292{
1293 struct ll_inode_info *lli = ll_i2info(inode);
1294 int rc = 0;
d7e09d03
PT
1295
1296 LASSERT(op_data != NULL);
1297 if (!S_ISREG(inode->i_mode))
0a3bdb00 1298 return 0;
d7e09d03 1299
b0f5aad5 1300 CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n",
d7e09d03
PT
1301 op_data->op_ioepoch, PFID(&lli->lli_fid));
1302
1303 op_data->op_flags = MF_EPOCH_CLOSE;
1304 ll_done_writing_attr(inode, op_data);
1305 ll_pack_inode2opdata(inode, op_data, NULL);
1306
1307 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
1308 if (rc == -EAGAIN) {
1309 /* MDS has instructed us to obtain Size-on-MDS attribute
1310 * from OSTs and send setattr to back to MDS. */
1311 rc = ll_som_update(inode, op_data);
1312 } else if (rc) {
1313 CERROR("inode %lu mdc truncate failed: rc = %d\n",
1314 inode->i_ino, rc);
1315 }
0a3bdb00 1316 return rc;
d7e09d03
PT
1317}
1318
1319static int ll_setattr_ost(struct inode *inode, struct iattr *attr)
1320{
1321 struct obd_capa *capa;
1322 int rc;
1323
1324 if (attr->ia_valid & ATTR_SIZE)
1325 capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
1326 else
1327 capa = ll_mdscapa_get(inode);
1328
1329 rc = cl_setattr_ost(inode, attr, capa);
1330
1331 if (attr->ia_valid & ATTR_SIZE)
1332 ll_truncate_free_capa(capa);
1333 else
1334 capa_put(capa);
1335
1336 return rc;
1337}
1338
1339
1340/* If this inode has objects allocated to it (lsm != NULL), then the OST
1341 * object(s) determine the file size and mtime. Otherwise, the MDS will
1342 * keep these values until such a time that objects are allocated for it.
1343 * We do the MDS operations first, as it is checking permissions for us.
1344 * We don't to the MDS RPC if there is nothing that we want to store there,
1345 * otherwise there is no harm in updating mtime/atime on the MDS if we are
1346 * going to do an RPC anyways.
1347 *
1348 * If we are doing a truncate, we will send the mtime and ctime updates
1349 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1350 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1351 * at the same time.
a720b790
JL
1352 *
1353 * In case of HSMimport, we only set attr on MDS.
d7e09d03 1354 */
a720b790 1355int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
d7e09d03
PT
1356{
1357 struct inode *inode = dentry->d_inode;
1358 struct ll_inode_info *lli = ll_i2info(inode);
1359 struct md_op_data *op_data = NULL;
1360 struct md_open_data *mod = NULL;
5ea17d6c 1361 bool file_is_released = false;
d7e09d03 1362 int rc = 0, rc1 = 0;
d7e09d03 1363
a720b790
JL
1364 CDEBUG(D_VFSTRACE,
1365 "%s: setattr inode %p/fid:"DFID
1366 " from %llu to %llu, valid %x, hsm_import %d\n",
1367 ll_get_fsname(inode->i_sb, NULL, 0), inode,
d7e09d03 1368 PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
a720b790 1369 attr->ia_valid, hsm_import);
d7e09d03
PT
1370
1371 if (attr->ia_valid & ATTR_SIZE) {
1372 /* Check new size against VFS/VM file size limit and rlimit */
1373 rc = inode_newsize_ok(inode, attr->ia_size);
1374 if (rc)
0a3bdb00 1375 return rc;
d7e09d03
PT
1376
1377 /* The maximum Lustre file size is variable, based on the
1378 * OST maximum object size and number of stripes. This
1379 * needs another check in addition to the VFS check above. */
1380 if (attr->ia_size > ll_file_maxbytes(inode)) {
1d8cb70c 1381 CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
d7e09d03
PT
1382 PFID(&lli->lli_fid), attr->ia_size,
1383 ll_file_maxbytes(inode));
0a3bdb00 1384 return -EFBIG;
d7e09d03
PT
1385 }
1386
1387 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1388 }
1389
1390 /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
1391 if (attr->ia_valid & TIMES_SET_FLAGS) {
4b1a25f0 1392 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2eb90a75 1393 !capable(CFS_CAP_FOWNER))
0a3bdb00 1394 return -EPERM;
d7e09d03
PT
1395 }
1396
1397 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1398 if (attr->ia_valid & ATTR_CTIME) {
0f1c743b 1399 attr->ia_ctime = CURRENT_TIME;
d7e09d03
PT
1400 attr->ia_valid |= ATTR_CTIME_SET;
1401 }
1402 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1403 (attr->ia_valid & ATTR_ATIME)) {
0f1c743b 1404 attr->ia_atime = CURRENT_TIME;
d7e09d03
PT
1405 attr->ia_valid |= ATTR_ATIME_SET;
1406 }
1407 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1408 (attr->ia_valid & ATTR_MTIME)) {
0f1c743b 1409 attr->ia_mtime = CURRENT_TIME;
d7e09d03
PT
1410 attr->ia_valid |= ATTR_MTIME_SET;
1411 }
1412
1413 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
1414 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
1415 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
7264b8a5 1416 get_seconds());
d7e09d03
PT
1417
1418 /* If we are changing file size, file content is modified, flag it. */
1419 if (attr->ia_valid & ATTR_SIZE) {
1420 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1421 spin_lock(&lli->lli_lock);
1422 lli->lli_flags |= LLIF_DATA_MODIFIED;
1423 spin_unlock(&lli->lli_lock);
1424 }
1425
1426 /* We always do an MDS RPC, even if we're only changing the size;
1427 * only the MDS knows whether truncate() should fail with -ETXTBUSY */
1428
1429 OBD_ALLOC_PTR(op_data);
1430 if (op_data == NULL)
0a3bdb00 1431 return -ENOMEM;
d7e09d03
PT
1432
1433 if (!S_ISDIR(inode->i_mode)) {
1434 if (attr->ia_valid & ATTR_SIZE)
1435 inode_dio_write_done(inode);
1436 mutex_unlock(&inode->i_mutex);
d7e09d03
PT
1437 }
1438
1439 memcpy(&op_data->op_attr, attr, sizeof(*attr));
1440
1441 /* Open epoch for truncate. */
1442 if (exp_connect_som(ll_i2mdexp(inode)) &&
1443 (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
1444 op_data->op_flags = MF_EPOCH_OPEN;
1445
5ea17d6c
JL
1446 /* truncate on a released file must failed with -ENODATA,
1447 * so size must not be set on MDS for released file
1448 * but other attributes must be set
1449 */
1450 if (S_ISREG(inode->i_mode)) {
1451 struct lov_stripe_md *lsm;
1452 __u32 gen;
1453
1454 ll_layout_refresh(inode, &gen);
1455 lsm = ccc_inode_lsm_get(inode);
1456 if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
1457 file_is_released = true;
1458 ccc_inode_lsm_put(inode, lsm);
1459 }
1460
a720b790 1461 /* if not in HSM import mode, clear size attr for released file
5ea17d6c
JL
1462 * we clear the attribute send to MDT in op_data, not the original
1463 * received from caller in attr which is used later to
1464 * decide return code */
a720b790 1465 if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import)
5ea17d6c
JL
1466 op_data->op_attr.ia_valid &= ~ATTR_SIZE;
1467
d7e09d03
PT
1468 rc = ll_md_setattr(dentry, op_data, &mod);
1469 if (rc)
1470 GOTO(out, rc);
1471
a720b790 1472 /* truncate failed (only when non HSM import), others succeed */
5ea17d6c 1473 if (file_is_released) {
a720b790 1474 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
5ea17d6c
JL
1475 GOTO(out, rc = -ENODATA);
1476 else
1477 GOTO(out, rc = 0);
1478 }
1479
d7e09d03
PT
1480 /* RPC to MDT is sent, cancel data modification flag */
1481 if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
1482 spin_lock(&lli->lli_lock);
1483 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
1484 spin_unlock(&lli->lli_lock);
1485 }
1486
1487 ll_ioepoch_open(lli, op_data->op_ioepoch);
1488 if (!S_ISREG(inode->i_mode))
1489 GOTO(out, rc = 0);
1490
1491 if (attr->ia_valid & (ATTR_SIZE |
1492 ATTR_ATIME | ATTR_ATIME_SET |
1493 ATTR_MTIME | ATTR_MTIME_SET))
1494 /* For truncate and utimes sending attributes to OSTs, setting
1495 * mtime/atime to the past will be performed under PW [0:EOF]
1496 * extent lock (new_size:EOF for truncate). It may seem
1497 * excessive to send mtime/atime updates to OSTs when not
1498 * setting times to past, but it is necessary due to possible
1499 * time de-synchronization between MDT inode and OST objects */
178ba1e0
BJ
1500 if (attr->ia_valid & ATTR_SIZE)
1501 down_write(&lli->lli_trunc_sem);
d7e09d03 1502 rc = ll_setattr_ost(inode, attr);
178ba1e0
BJ
1503 if (attr->ia_valid & ATTR_SIZE)
1504 up_write(&lli->lli_trunc_sem);
d7e09d03
PT
1505out:
1506 if (op_data) {
1507 if (op_data->op_ioepoch) {
1508 rc1 = ll_setattr_done_writing(inode, op_data, mod);
1509 if (!rc)
1510 rc = rc1;
1511 }
1512 ll_finish_md_op_data(op_data);
1513 }
1514 if (!S_ISDIR(inode->i_mode)) {
d7e09d03 1515 mutex_lock(&inode->i_mutex);
a720b790 1516 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
d7e09d03
PT
1517 inode_dio_wait(inode);
1518 }
1519
1520 ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1521 LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1522
251c4317 1523 return rc;
d7e09d03
PT
1524}
1525
1526int ll_setattr(struct dentry *de, struct iattr *attr)
1527{
1528 int mode = de->d_inode->i_mode;
1529
1530 if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
1531 (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
1532 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1533
1534 if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
1535 (ATTR_SIZE|ATTR_MODE)) &&
1536 (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
1537 (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
1538 !(attr->ia_mode & S_ISGID))))
1539 attr->ia_valid |= ATTR_FORCE;
1540
98639249
NC
1541 if ((attr->ia_valid & ATTR_MODE) &&
1542 (mode & S_ISUID) &&
d7e09d03
PT
1543 !(attr->ia_mode & S_ISUID) &&
1544 !(attr->ia_valid & ATTR_KILL_SUID))
1545 attr->ia_valid |= ATTR_KILL_SUID;
1546
98639249
NC
1547 if ((attr->ia_valid & ATTR_MODE) &&
1548 ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
d7e09d03
PT
1549 !(attr->ia_mode & S_ISGID) &&
1550 !(attr->ia_valid & ATTR_KILL_SGID))
1551 attr->ia_valid |= ATTR_KILL_SGID;
1552
a720b790 1553 return ll_setattr_raw(de, attr, false);
d7e09d03
PT
1554}
1555
1556int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1557 __u64 max_age, __u32 flags)
1558{
1559 struct ll_sb_info *sbi = ll_s2sbi(sb);
1560 struct obd_statfs obd_osfs;
1561 int rc;
d7e09d03
PT
1562
1563 rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1564 if (rc) {
1565 CERROR("md_statfs fails: rc = %d\n", rc);
0a3bdb00 1566 return rc;
d7e09d03
PT
1567 }
1568
1569 osfs->os_type = sb->s_magic;
1570
b0f5aad5 1571 CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
1d8cb70c
GD
1572 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,
1573 osfs->os_files);
d7e09d03
PT
1574
1575 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1576 flags |= OBD_STATFS_NODELAY;
1577
1578 rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1579 if (rc) {
1580 CERROR("obd_statfs fails: rc = %d\n", rc);
0a3bdb00 1581 return rc;
d7e09d03
PT
1582 }
1583
b0f5aad5 1584 CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
d7e09d03
PT
1585 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1586 obd_osfs.os_files);
1587
1588 osfs->os_bsize = obd_osfs.os_bsize;
1589 osfs->os_blocks = obd_osfs.os_blocks;
1590 osfs->os_bfree = obd_osfs.os_bfree;
1591 osfs->os_bavail = obd_osfs.os_bavail;
1592
1593 /* If we don't have as many objects free on the OST as inodes
1594 * on the MDS, we reduce the total number of inodes to
1595 * compensate, so that the "inodes in use" number is correct.
1596 */
1597 if (obd_osfs.os_ffree < osfs->os_ffree) {
1598 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1599 obd_osfs.os_ffree;
1600 osfs->os_ffree = obd_osfs.os_ffree;
1601 }
1602
0a3bdb00 1603 return rc;
d7e09d03
PT
1604}
1605int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1606{
1607 struct super_block *sb = de->d_sb;
1608 struct obd_statfs osfs;
1609 int rc;
1610
b0f5aad5 1611 CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64());
d7e09d03
PT
1612 ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1613
1614 /* Some amount of caching on the client is allowed */
1615 rc = ll_statfs_internal(sb, &osfs,
1616 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1617 0);
1618 if (rc)
1619 return rc;
1620
1621 statfs_unpack(sfs, &osfs);
1622
1623 /* We need to downshift for all 32-bit kernels, because we can't
1624 * tell if the kernel is being called via sys_statfs64() or not.
1625 * Stop before overflowing f_bsize - in which case it is better
1626 * to just risk EOVERFLOW if caller is using old sys_statfs(). */
1627 if (sizeof(long) < 8) {
1628 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1629 sfs->f_bsize <<= 1;
1630
1631 osfs.os_blocks >>= 1;
1632 osfs.os_bfree >>= 1;
1633 osfs.os_bavail >>= 1;
1634 }
1635 }
1636
1637 sfs->f_blocks = osfs.os_blocks;
1638 sfs->f_bfree = osfs.os_bfree;
1639 sfs->f_bavail = osfs.os_bavail;
bd994071 1640 sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
d7e09d03
PT
1641 return 0;
1642}
1643
1644void ll_inode_size_lock(struct inode *inode)
1645{
1646 struct ll_inode_info *lli;
1647
1648 LASSERT(!S_ISDIR(inode->i_mode));
1649
1650 lli = ll_i2info(inode);
47a57bde 1651 mutex_lock(&lli->lli_size_mutex);
d7e09d03
PT
1652}
1653
1654void ll_inode_size_unlock(struct inode *inode)
1655{
1656 struct ll_inode_info *lli;
1657
1658 lli = ll_i2info(inode);
47a57bde 1659 mutex_unlock(&lli->lli_size_mutex);
d7e09d03
PT
1660}
1661
1662void ll_update_inode(struct inode *inode, struct lustre_md *md)
1663{
1664 struct ll_inode_info *lli = ll_i2info(inode);
1665 struct mdt_body *body = md->body;
1666 struct lov_stripe_md *lsm = md->lsm;
1667 struct ll_sb_info *sbi = ll_i2sbi(inode);
1668
1669 LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
1670 if (lsm != NULL) {
1671 if (!lli->lli_has_smd &&
1672 !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
1673 cl_file_inode_init(inode, md);
1674
1675 lli->lli_maxbytes = lsm->lsm_maxbytes;
1676 if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
1677 lli->lli_maxbytes = MAX_LFS_FILESIZE;
1678 }
1679
1680 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
1681 if (body->valid & OBD_MD_FLRMTPERM)
1682 ll_update_remote_perm(inode, md->remote_perm);
1683 }
1684#ifdef CONFIG_FS_POSIX_ACL
1685 else if (body->valid & OBD_MD_FLACL) {
1686 spin_lock(&lli->lli_lock);
1687 if (lli->lli_posix_acl)
1688 posix_acl_release(lli->lli_posix_acl);
1689 lli->lli_posix_acl = md->posix_acl;
1690 spin_unlock(&lli->lli_lock);
1691 }
1692#endif
c1e2699d 1693 inode->i_ino = cl_fid_build_ino(&body->fid1,
1694 sbi->ll_flags & LL_SBI_32BIT_API);
d7e09d03
PT
1695 inode->i_generation = cl_fid_build_gen(&body->fid1);
1696
1697 if (body->valid & OBD_MD_FLATIME) {
1698 if (body->atime > LTIME_S(inode->i_atime))
1699 LTIME_S(inode->i_atime) = body->atime;
1700 lli->lli_lvb.lvb_atime = body->atime;
1701 }
1702 if (body->valid & OBD_MD_FLMTIME) {
1703 if (body->mtime > LTIME_S(inode->i_mtime)) {
b0f5aad5
GKH
1704 CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
1705 inode->i_ino, LTIME_S(inode->i_mtime),
1706 body->mtime);
d7e09d03
PT
1707 LTIME_S(inode->i_mtime) = body->mtime;
1708 }
1709 lli->lli_lvb.lvb_mtime = body->mtime;
1710 }
1711 if (body->valid & OBD_MD_FLCTIME) {
1712 if (body->ctime > LTIME_S(inode->i_ctime))
1713 LTIME_S(inode->i_ctime) = body->ctime;
1714 lli->lli_lvb.lvb_ctime = body->ctime;
1715 }
1716 if (body->valid & OBD_MD_FLMODE)
1717 inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
1718 if (body->valid & OBD_MD_FLTYPE)
1719 inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
1720 LASSERT(inode->i_mode != 0);
1721 if (S_ISREG(inode->i_mode)) {
1722 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
1723 } else {
1724 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
1725 }
1726 if (body->valid & OBD_MD_FLUID)
4b1a25f0 1727 inode->i_uid = make_kuid(&init_user_ns, body->uid);
d7e09d03 1728 if (body->valid & OBD_MD_FLGID)
4b1a25f0 1729 inode->i_gid = make_kgid(&init_user_ns, body->gid);
d7e09d03
PT
1730 if (body->valid & OBD_MD_FLFLAGS)
1731 inode->i_flags = ll_ext_to_inode_flags(body->flags);
1732 if (body->valid & OBD_MD_FLNLINK)
1733 set_nlink(inode, body->nlink);
1734 if (body->valid & OBD_MD_FLRDEV)
1735 inode->i_rdev = old_decode_dev(body->rdev);
1736
1737 if (body->valid & OBD_MD_FLID) {
1738 /* FID shouldn't be changed! */
1739 if (fid_is_sane(&lli->lli_fid)) {
1740 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
1741 "Trying to change FID "DFID
1742 " to the "DFID", inode %lu/%u(%p)\n",
1743 PFID(&lli->lli_fid), PFID(&body->fid1),
1744 inode->i_ino, inode->i_generation, inode);
1745 } else
1746 lli->lli_fid = body->fid1;
1747 }
1748
1749 LASSERT(fid_seq(&lli->lli_fid) != 0);
1750
1751 if (body->valid & OBD_MD_FLSIZE) {
1752 if (exp_connect_som(ll_i2mdexp(inode)) &&
1753 S_ISREG(inode->i_mode)) {
1754 struct lustre_handle lockh;
1755 ldlm_mode_t mode;
1756
1757 /* As it is possible a blocking ast has been processed
1758 * by this time, we need to check there is an UPDATE
1759 * lock on the client and set LLIF_MDS_SIZE_LOCK holding
1760 * it. */
1761 mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
7fc1f831
AP
1762 &lockh, LDLM_FL_CBPENDING,
1763 LCK_CR | LCK_CW |
1764 LCK_PR | LCK_PW);
d7e09d03
PT
1765 if (mode) {
1766 if (lli->lli_flags & (LLIF_DONE_WRITING |
1767 LLIF_EPOCH_PENDING |
1768 LLIF_SOM_DIRTY)) {
1769 CERROR("ino %lu flags %u still has "
1770 "size authority! do not trust "
1771 "the size got from MDS\n",
1772 inode->i_ino, lli->lli_flags);
1773 } else {
1774 /* Use old size assignment to avoid
1775 * deadlock bz14138 & bz14326 */
1776 i_size_write(inode, body->size);
ae5ef67b 1777 spin_lock(&lli->lli_lock);
d7e09d03 1778 lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
ae5ef67b 1779 spin_unlock(&lli->lli_lock);
d7e09d03
PT
1780 }
1781 ldlm_lock_decref(&lockh, mode);
1782 }
1783 } else {
1784 /* Use old size assignment to avoid
1785 * deadlock bz14138 & bz14326 */
1786 i_size_write(inode, body->size);
1787
1788 CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
1789 inode->i_ino, (unsigned long long)body->size);
1790 }
1791
1792 if (body->valid & OBD_MD_FLBLOCKS)
1793 inode->i_blocks = body->blocks;
1794 }
1795
1796 if (body->valid & OBD_MD_FLMDSCAPA) {
1797 LASSERT(md->mds_capa);
1798 ll_add_capa(inode, md->mds_capa);
1799 }
1800 if (body->valid & OBD_MD_FLOSSCAPA) {
1801 LASSERT(md->oss_capa);
1802 ll_add_capa(inode, md->oss_capa);
1803 }
5ea17d6c
JL
1804
1805 if (body->valid & OBD_MD_TSTATE) {
1806 if (body->t_state & MS_RESTORE)
1807 lli->lli_flags |= LLIF_FILE_RESTORING;
1808 }
d7e09d03
PT
1809}
1810
1811void ll_read_inode2(struct inode *inode, void *opaque)
1812{
1813 struct lustre_md *md = opaque;
1814 struct ll_inode_info *lli = ll_i2info(inode);
d7e09d03
PT
1815
1816 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1817 PFID(&lli->lli_fid), inode);
1818
1819 LASSERT(!lli->lli_has_smd);
1820
1821 /* Core attributes from the MDS first. This is a new inode, and
1822 * the VFS doesn't zero times in the core inode so we have to do
1823 * it ourselves. They will be overwritten by either MDS or OST
1824 * attributes - we just need to make sure they aren't newer. */
1825 LTIME_S(inode->i_mtime) = 0;
1826 LTIME_S(inode->i_atime) = 0;
1827 LTIME_S(inode->i_ctime) = 0;
1828 inode->i_rdev = 0;
1829 ll_update_inode(inode, md);
1830
1831 /* OIDEBUG(inode); */
1832
1833 /* initializing backing dev info. */
1834 inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
1835
1836
1837 if (S_ISREG(inode->i_mode)) {
1838 struct ll_sb_info *sbi = ll_i2sbi(inode);
1839 inode->i_op = &ll_file_inode_operations;
1840 inode->i_fop = sbi->ll_fop;
1841 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
d7e09d03
PT
1842 } else if (S_ISDIR(inode->i_mode)) {
1843 inode->i_op = &ll_dir_inode_operations;
1844 inode->i_fop = &ll_dir_operations;
d7e09d03
PT
1845 } else if (S_ISLNK(inode->i_mode)) {
1846 inode->i_op = &ll_fast_symlink_inode_operations;
d7e09d03
PT
1847 } else {
1848 inode->i_op = &ll_special_inode_operations;
1849
1850 init_special_inode(inode, inode->i_mode,
1851 inode->i_rdev);
d7e09d03
PT
1852 }
1853}
1854
1855void ll_delete_inode(struct inode *inode)
1856{
1857 struct cl_inode_info *lli = cl_i2info(inode);
d7e09d03
PT
1858
1859 if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
1860 /* discard all dirty pages before truncating them, required by
1861 * osc_extent implementation at LU-1030. */
65fb55d1
NY
1862 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
1863 CL_FSYNC_DISCARD, 1);
d7e09d03 1864
91b0abe3 1865 truncate_inode_pages_final(&inode->i_data);
d7e09d03
PT
1866
1867 /* Workaround for LU-118 */
1868 if (inode->i_data.nrpages) {
1869 TREE_READ_LOCK_IRQ(&inode->i_data);
1870 TREE_READ_UNLOCK_IRQ(&inode->i_data);
1871 LASSERTF(inode->i_data.nrpages == 0,
1872 "inode=%lu/%u(%p) nrpages=%lu, see "
1873 "http://jira.whamcloud.com/browse/LU-118\n",
1874 inode->i_ino, inode->i_generation, inode,
1875 inode->i_data.nrpages);
1876 }
1877 /* Workaround end */
1878
1879 ll_clear_inode(inode);
1880 clear_inode(inode);
d7e09d03
PT
1881}
1882
1883int ll_iocontrol(struct inode *inode, struct file *file,
1884 unsigned int cmd, unsigned long arg)
1885{
1886 struct ll_sb_info *sbi = ll_i2sbi(inode);
1887 struct ptlrpc_request *req = NULL;
1888 int rc, flags = 0;
d7e09d03 1889
a58a38ac 1890 switch (cmd) {
d7e09d03
PT
1891 case FSFILT_IOC_GETFLAGS: {
1892 struct mdt_body *body;
1893 struct md_op_data *op_data;
1894
1895 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1896 0, 0, LUSTRE_OPC_ANY,
1897 NULL);
1898 if (IS_ERR(op_data))
0a3bdb00 1899 return PTR_ERR(op_data);
d7e09d03
PT
1900
1901 op_data->op_valid = OBD_MD_FLFLAGS;
1902 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1903 ll_finish_md_op_data(op_data);
1904 if (rc) {
1905 CERROR("failure %d inode %lu\n", rc, inode->i_ino);
0a3bdb00 1906 return -abs(rc);
d7e09d03
PT
1907 }
1908
1909 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1910
1911 flags = body->flags;
1912
1913 ptlrpc_req_finished(req);
1914
0a3bdb00 1915 return put_user(flags, (int *)arg);
d7e09d03
PT
1916 }
1917 case FSFILT_IOC_SETFLAGS: {
1918 struct lov_stripe_md *lsm;
1919 struct obd_info oinfo = { { { 0 } } };
1920 struct md_op_data *op_data;
1921
1922 if (get_user(flags, (int *)arg))
0a3bdb00 1923 return -EFAULT;
d7e09d03
PT
1924
1925 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1926 LUSTRE_OPC_ANY, NULL);
1927 if (IS_ERR(op_data))
0a3bdb00 1928 return PTR_ERR(op_data);
d7e09d03
PT
1929
1930 ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
1931 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
1932 rc = md_setattr(sbi->ll_md_exp, op_data,
1933 NULL, 0, NULL, 0, &req, NULL);
1934 ll_finish_md_op_data(op_data);
1935 ptlrpc_req_finished(req);
1936 if (rc)
0a3bdb00 1937 return rc;
d7e09d03
PT
1938
1939 inode->i_flags = ll_ext_to_inode_flags(flags);
1940
1941 lsm = ccc_inode_lsm_get(inode);
5dd16419
JX
1942 if (!lsm_has_objects(lsm)) {
1943 ccc_inode_lsm_put(inode, lsm);
0a3bdb00 1944 return 0;
5dd16419 1945 }
d7e09d03
PT
1946
1947 OBDO_ALLOC(oinfo.oi_oa);
1948 if (!oinfo.oi_oa) {
1949 ccc_inode_lsm_put(inode, lsm);
0a3bdb00 1950 return -ENOMEM;
d7e09d03
PT
1951 }
1952 oinfo.oi_md = lsm;
1953 oinfo.oi_oa->o_oi = lsm->lsm_oi;
1954 oinfo.oi_oa->o_flags = flags;
1955 oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
1956 OBD_MD_FLGROUP;
1957 oinfo.oi_capa = ll_mdscapa_get(inode);
1958 obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
1959 rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
1960 capa_put(oinfo.oi_capa);
1961 OBDO_FREE(oinfo.oi_oa);
1962 ccc_inode_lsm_put(inode, lsm);
1963
1964 if (rc && rc != -EPERM && rc != -EACCES)
1965 CERROR("osc_setattr_async fails: rc = %d\n", rc);
1966
0a3bdb00 1967 return rc;
d7e09d03
PT
1968 }
1969 default:
0a3bdb00 1970 return -ENOSYS;
d7e09d03
PT
1971 }
1972
0a3bdb00 1973 return 0;
d7e09d03
PT
1974}
1975
1976int ll_flush_ctx(struct inode *inode)
1977{
1978 struct ll_sb_info *sbi = ll_i2sbi(inode);
1979
4b1a25f0
PT
1980 CDEBUG(D_SEC, "flush context for user %d\n",
1981 from_kuid(&init_user_ns, current_uid()));
d7e09d03
PT
1982
1983 obd_set_info_async(NULL, sbi->ll_md_exp,
1984 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1985 0, NULL, NULL);
1986 obd_set_info_async(NULL, sbi->ll_dt_exp,
1987 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1988 0, NULL, NULL);
1989 return 0;
1990}
1991
1992/* umount -f client means force down, don't save state */
1993void ll_umount_begin(struct super_block *sb)
1994{
1995 struct ll_sb_info *sbi = ll_s2sbi(sb);
1996 struct obd_device *obd;
1997 struct obd_ioctl_data *ioc_data;
d7e09d03
PT
1998
1999 CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
2000 sb->s_count, atomic_read(&sb->s_active));
2001
2002 obd = class_exp2obd(sbi->ll_md_exp);
2003 if (obd == NULL) {
55f5a824 2004 CERROR("Invalid MDC connection handle %#llx\n",
d7e09d03 2005 sbi->ll_md_exp->exp_handle.h_cookie);
d7e09d03
PT
2006 return;
2007 }
2008 obd->obd_force = 1;
2009
2010 obd = class_exp2obd(sbi->ll_dt_exp);
2011 if (obd == NULL) {
55f5a824 2012 CERROR("Invalid LOV connection handle %#llx\n",
d7e09d03 2013 sbi->ll_dt_exp->exp_handle.h_cookie);
d7e09d03
PT
2014 return;
2015 }
2016 obd->obd_force = 1;
2017
2018 OBD_ALLOC_PTR(ioc_data);
2019 if (ioc_data) {
2020 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
ec83e611 2021 sizeof(*ioc_data), ioc_data, NULL);
d7e09d03
PT
2022
2023 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
ec83e611 2024 sizeof(*ioc_data), ioc_data, NULL);
d7e09d03
PT
2025
2026 OBD_FREE_PTR(ioc_data);
2027 }
2028
d7e09d03
PT
2029 /* Really, we'd like to wait until there are no requests outstanding,
2030 * and then continue. For now, we just invalidate the requests,
2031 * schedule() and sleep one second if needed, and hope.
2032 */
2033 schedule();
d7e09d03
PT
2034}
2035
2036int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2037{
2038 struct ll_sb_info *sbi = ll_s2sbi(sb);
2039 char *profilenm = get_profile_name(sb);
2040 int err;
2041 __u32 read_only;
2042
2043 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2044 read_only = *flags & MS_RDONLY;
2045 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2046 sizeof(KEY_READ_ONLY),
2047 KEY_READ_ONLY, sizeof(read_only),
2048 &read_only, NULL);
2049 if (err) {
2050 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2051 profilenm, read_only ?
2052 "read-only" : "read-write", err);
2053 return err;
2054 }
2055
2056 if (read_only)
2057 sb->s_flags |= MS_RDONLY;
2058 else
2059 sb->s_flags &= ~MS_RDONLY;
2060
2061 if (sbi->ll_flags & LL_SBI_VERBOSE)
2062 LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2063 read_only ? "read-only" : "read-write");
2064 }
2065 return 0;
2066}
2067
2068int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2069 struct super_block *sb, struct lookup_intent *it)
2070{
2071 struct ll_sb_info *sbi = NULL;
2072 struct lustre_md md;
2073 int rc;
d7e09d03
PT
2074
2075 LASSERT(*inode || sb);
2076 sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2077 rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2078 sbi->ll_md_exp, &md);
2079 if (rc)
0a3bdb00 2080 return rc;
d7e09d03
PT
2081
2082 if (*inode) {
2083 ll_update_inode(*inode, &md);
2084 } else {
2085 LASSERT(sb != NULL);
2086
2087 /*
2088 * At this point server returns to client's same fid as client
2089 * generated for creating. So using ->fid1 is okay here.
2090 */
2091 LASSERT(fid_is_sane(&md.body->fid1));
2092
2093 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
c1e2699d 2094 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03
PT
2095 &md);
2096 if (*inode == NULL || IS_ERR(*inode)) {
2097#ifdef CONFIG_FS_POSIX_ACL
2098 if (md.posix_acl) {
2099 posix_acl_release(md.posix_acl);
2100 md.posix_acl = NULL;
2101 }
2102#endif
2103 rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
2104 *inode = NULL;
2105 CERROR("new_inode -fatal: rc %d\n", rc);
2106 GOTO(out, rc);
2107 }
2108 }
2109
2110 /* Handling piggyback layout lock.
2111 * Layout lock can be piggybacked by getattr and open request.
2112 * The lsm can be applied to inode only if it comes with a layout lock
2113 * otherwise correct layout may be overwritten, for example:
2114 * 1. proc1: mdt returns a lsm but not granting layout
2115 * 2. layout was changed by another client
2116 * 3. proc2: refresh layout and layout lock granted
2117 * 4. proc1: to apply a stale layout */
2118 if (it != NULL && it->d.lustre.it_lock_mode != 0) {
2119 struct lustre_handle lockh;
2120 struct ldlm_lock *lock;
2121
2122 lockh.cookie = it->d.lustre.it_lock_handle;
2123 lock = ldlm_handle2lock(&lockh);
2124 LASSERT(lock != NULL);
2125 if (ldlm_has_layout(lock)) {
2126 struct cl_object_conf conf;
2127
2128 memset(&conf, 0, sizeof(conf));
2129 conf.coc_opc = OBJECT_CONF_SET;
2130 conf.coc_inode = *inode;
2131 conf.coc_lock = lock;
2132 conf.u.coc_md = &md;
2133 (void)ll_layout_conf(*inode, &conf);
2134 }
2135 LDLM_LOCK_PUT(lock);
2136 }
2137
2138out:
2139 if (md.lsm != NULL)
2140 obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
2141 md_free_lustre_md(sbi->ll_md_exp, &md);
0a3bdb00 2142 return rc;
d7e09d03
PT
2143}
2144
2145int ll_obd_statfs(struct inode *inode, void *arg)
2146{
2147 struct ll_sb_info *sbi = NULL;
2148 struct obd_export *exp;
2149 char *buf = NULL;
2150 struct obd_ioctl_data *data = NULL;
2151 __u32 type;
2152 __u32 flags;
2153 int len = 0, rc;
2154
2155 if (!inode || !(sbi = ll_i2sbi(inode)))
2156 GOTO(out_statfs, rc = -EINVAL);
2157
2158 rc = obd_ioctl_getdata(&buf, &len, arg);
2159 if (rc)
2160 GOTO(out_statfs, rc);
2161
2162 data = (void*)buf;
2163 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
2164 !data->ioc_pbuf1 || !data->ioc_pbuf2)
2165 GOTO(out_statfs, rc = -EINVAL);
2166
2167 if (data->ioc_inllen1 != sizeof(__u32) ||
2168 data->ioc_inllen2 != sizeof(__u32) ||
2169 data->ioc_plen1 != sizeof(struct obd_statfs) ||
2170 data->ioc_plen2 != sizeof(struct obd_uuid))
2171 GOTO(out_statfs, rc = -EINVAL);
2172
2173 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
2174 if (type & LL_STATFS_LMV)
2175 exp = sbi->ll_md_exp;
2176 else if (type & LL_STATFS_LOV)
2177 exp = sbi->ll_dt_exp;
2178 else
2179 GOTO(out_statfs, rc = -ENODEV);
2180
2181 flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0;
2182 rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags);
2183 if (rc)
2184 GOTO(out_statfs, rc);
2185out_statfs:
2186 if (buf)
2187 obd_ioctl_freedata(buf, len);
2188 return rc;
2189}
2190
2191int ll_process_config(struct lustre_cfg *lcfg)
2192{
2193 char *ptr;
2194 void *sb;
2195 struct lprocfs_static_vars lvars;
2196 unsigned long x;
2197 int rc = 0;
2198
2199 lprocfs_llite_init_vars(&lvars);
2200
2201 /* The instance name contains the sb: lustre-client-aacfe000 */
2202 ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2203 if (!ptr || !*(++ptr))
2204 return -EINVAL;
2205 if (sscanf(ptr, "%lx", &x) != 1)
2206 return -EINVAL;
2207 sb = (void *)x;
2208 /* This better be a real Lustre superblock! */
2209 LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2210
2211 /* Note we have not called client_common_fill_super yet, so
2212 proc fns must be able to handle that! */
2213 rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2214 lcfg, sb);
2215 if (rc > 0)
2216 rc = 0;
fbe7c6c7 2217 return rc;
d7e09d03
PT
2218}
2219
2220/* this function prepares md_op_data hint for passing ot down to MD stack. */
aff9d8e8 2221struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
d7e09d03
PT
2222 struct inode *i1, struct inode *i2,
2223 const char *name, int namelen,
2224 int mode, __u32 opc, void *data)
2225{
2226 LASSERT(i1 != NULL);
2227
2228 if (namelen > ll_i2sbi(i1)->ll_namelen)
2229 return ERR_PTR(-ENAMETOOLONG);
2230
2231 if (op_data == NULL)
2232 OBD_ALLOC_PTR(op_data);
2233
2234 if (op_data == NULL)
2235 return ERR_PTR(-ENOMEM);
2236
2237 ll_i2gids(op_data->op_suppgids, i1, i2);
2238 op_data->op_fid1 = *ll_inode2fid(i1);
2239 op_data->op_capa1 = ll_mdscapa_get(i1);
2240
2241 if (i2) {
2242 op_data->op_fid2 = *ll_inode2fid(i2);
2243 op_data->op_capa2 = ll_mdscapa_get(i2);
2244 } else {
2245 fid_zero(&op_data->op_fid2);
2246 op_data->op_capa2 = NULL;
2247 }
2248
2249 op_data->op_name = name;
2250 op_data->op_namelen = namelen;
2251 op_data->op_mode = mode;
7264b8a5 2252 op_data->op_mod_time = get_seconds();
4b1a25f0
PT
2253 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2254 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
d7e09d03
PT
2255 op_data->op_cap = cfs_curproc_cap_pack();
2256 op_data->op_bias = 0;
2257 op_data->op_cli_flags = 0;
2258 if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
2259 filename_is_volatile(name, namelen, NULL))
2260 op_data->op_bias |= MDS_CREATE_VOLATILE;
2261 op_data->op_opc = opc;
2262 op_data->op_mds = 0;
2263 op_data->op_data = data;
2264
2265 /* If the file is being opened after mknod() (normally due to NFS)
2266 * try to use the default stripe data from parent directory for
2267 * allocating OST objects. Try to pass the parent FID to MDS. */
2268 if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
2269 !ll_i2info(i2)->lli_has_smd) {
2270 struct ll_inode_info *lli = ll_i2info(i2);
2271
2272 spin_lock(&lli->lli_lock);
2273 if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
2274 op_data->op_fid1 = lli->lli_pfid;
2275 spin_unlock(&lli->lli_lock);
2276 /** We ignore parent's capability temporary. */
2277 }
2278
2279 /* When called by ll_setattr_raw, file is i1. */
2280 if (LLIF_DATA_MODIFIED & ll_i2info(i1)->lli_flags)
2281 op_data->op_bias |= MDS_DATA_MODIFIED;
2282
2283 return op_data;
2284}
2285
2286void ll_finish_md_op_data(struct md_op_data *op_data)
2287{
2288 capa_put(op_data->op_capa1);
2289 capa_put(op_data->op_capa2);
2290 OBD_FREE_PTR(op_data);
2291}
2292
2293int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2294{
2295 struct ll_sb_info *sbi;
2296
2297 LASSERT((seq != NULL) && (dentry != NULL));
2298 sbi = ll_s2sbi(dentry->d_sb);
2299
2300 if (sbi->ll_flags & LL_SBI_NOLCK)
2301 seq_puts(seq, ",nolock");
2302
2303 if (sbi->ll_flags & LL_SBI_FLOCK)
2304 seq_puts(seq, ",flock");
2305
2306 if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2307 seq_puts(seq, ",localflock");
2308
2309 if (sbi->ll_flags & LL_SBI_USER_XATTR)
2310 seq_puts(seq, ",user_xattr");
2311
2312 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2313 seq_puts(seq, ",lazystatfs");
2314
2315 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2316 seq_puts(seq, ",user_fid2path");
2317
0a3bdb00 2318 return 0;
d7e09d03
PT
2319}
2320
2321/**
2322 * Get obd name by cmd, and copy out to user space
2323 */
2324int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2325{
2326 struct ll_sb_info *sbi = ll_i2sbi(inode);
2327 struct obd_device *obd;
d7e09d03
PT
2328
2329 if (cmd == OBD_IOC_GETDTNAME)
2330 obd = class_exp2obd(sbi->ll_dt_exp);
2331 else if (cmd == OBD_IOC_GETMDNAME)
2332 obd = class_exp2obd(sbi->ll_md_exp);
2333 else
0a3bdb00 2334 return -EINVAL;
d7e09d03
PT
2335
2336 if (!obd)
0a3bdb00 2337 return -ENOENT;
d7e09d03
PT
2338
2339 if (copy_to_user((void *)arg, obd->obd_name,
2340 strlen(obd->obd_name) + 1))
0a3bdb00 2341 return -EFAULT;
d7e09d03 2342
0a3bdb00 2343 return 0;
d7e09d03
PT
2344}
2345
2346/**
2347 * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2348 * fsname will be returned in this buffer; otherwise, a static buffer will be
2349 * used to store the fsname and returned to caller.
2350 */
2351char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2352{
2353 static char fsname_static[MTI_NAME_MAXLEN];
2354 struct lustre_sb_info *lsi = s2lsi(sb);
2355 char *ptr;
2356 int len;
2357
2358 if (buf == NULL) {
2359 /* this means the caller wants to use static buffer
2360 * and it doesn't care about race. Usually this is
2361 * in error reporting path */
2362 buf = fsname_static;
2363 buflen = sizeof(fsname_static);
2364 }
2365
2366 len = strlen(lsi->lsi_lmd->lmd_profile);
2367 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2368 if (ptr && (strcmp(ptr, "-client") == 0))
2369 len -= 7;
2370
2371 if (unlikely(len >= buflen))
2372 len = buflen - 1;
2373 strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2374 buf[len] = '\0';
2375
2376 return buf;
2377}
2378
2379static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
2380{
2381 char *path = NULL;
2382
2383 struct path p;
2384
2385 p.dentry = dentry;
2386 p.mnt = current->fs->root.mnt;
2387 path_get(&p);
2388 path = d_path(&p, buf, bufsize);
2389 path_put(&p);
2390
2391 return path;
2392}
2393
2394void ll_dirty_page_discard_warn(struct page *page, int ioret)
2395{
2396 char *buf, *path = NULL;
2397 struct dentry *dentry = NULL;
2398 struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
2399
2400 /* this can be called inside spin lock so use GFP_ATOMIC. */
2401 buf = (char *)__get_free_page(GFP_ATOMIC);
2402 if (buf != NULL) {
2403 dentry = d_find_alias(page->mapping->host);
2404 if (dentry != NULL)
2405 path = ll_d_path(dentry, buf, PAGE_SIZE);
2406 }
2407
73b89907
RH
2408 CDEBUG(D_WARNING,
2409 "%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
2410 "(rc %d)\n", ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
2411 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
2412 PFID(&obj->cob_header.coh_lu.loh_fid),
2413 (path && !IS_ERR(path)) ? path : "", ioret);
d7e09d03
PT
2414
2415 if (dentry != NULL)
2416 dput(dentry);
2417
2418 if (buf != NULL)
2419 free_page((unsigned long)buf);
2420}