Merge remote-tracking branches 'asoc/topic/mc13783', 'asoc/topic/msm8916', 'asoc...
[linux-block.git] / drivers / staging / lustre / lustre / llite / llite_lib.c
CommitLineData
c14dd9d5 1// SPDX-License-Identifier: GPL-2.0
d7e09d03
PT
2/*
3 * GPL HEADER START
4 *
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 only,
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License version 2 for more details (a copy is included
15 * in the LICENSE file that accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License
18 * version 2 along with this program; If not, see
6a5b99a4 19 * http://www.gnu.org/licenses/gpl-2.0.html
d7e09d03 20 *
d7e09d03
PT
21 * GPL HEADER END
22 */
23/*
24 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Use is subject to license terms.
26 *
1dc563a6 27 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
28 */
29/*
30 * This file is part of Lustre, http://www.lustre.org/
31 * Lustre is a trademark of Sun Microsystems, Inc.
32 *
33 * lustre/llite/llite_lib.c
34 *
35 * Lustre Light Super operations
36 */
37
38#define DEBUG_SUBSYSTEM S_LLITE
39
40#include <linux/module.h>
a9c7db39 41#include <linux/statfs.h>
d7e09d03 42#include <linux/types.h>
d7e09d03
PT
43#include <linux/mm.h>
44
b2e475b1
JS
45#include <uapi/linux/lustre/lustre_ioctl.h>
46#include <lustre_ha.h>
47#include <lustre_dlm.h>
48#include <lprocfs_status.h>
49#include <lustre_disk.h>
50#include <uapi/linux/lustre/lustre_param.h>
51#include <lustre_log.h>
52#include <cl_object.h>
53#include <obd_cksum.h>
d7e09d03
PT
54#include "llite_internal.h"
55
56struct kmem_cache *ll_file_data_slab;
ae7c0f48 57struct dentry *llite_root;
fd0d04ba 58struct kset *llite_kset;
d7e09d03 59
d7e09d03
PT
60#ifndef log2
61#define log2(n) ffz(~(n))
62#endif
63
fd0d04ba 64static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
d7e09d03
PT
65{
66 struct ll_sb_info *sbi = NULL;
67 unsigned long pages;
68 unsigned long lru_page_max;
69 struct sysinfo si;
70 class_uuid_t uuid;
71 int i;
d7e09d03 72
496a51bd 73 sbi = kzalloc(sizeof(*sbi), GFP_NOFS);
d7e09d03 74 if (!sbi)
0a3bdb00 75 return NULL;
d7e09d03
PT
76
77 spin_lock_init(&sbi->ll_lock);
78 mutex_init(&sbi->ll_lco.lco_lock);
79 spin_lock_init(&sbi->ll_pp_extent_lock);
80 spin_lock_init(&sbi->ll_process_lock);
81 sbi->ll_rw_stats_on = 0;
82
83 si_meminfo(&si);
84 pages = si.totalram - si.totalhigh;
5196e42c 85 lru_page_max = pages / 2;
d7e09d03 86
1b02bde3
EL
87 sbi->ll_cache = cl_cache_init(lru_page_max);
88 if (!sbi->ll_cache) {
89 kfree(sbi);
90 return NULL;
91 }
ac5b1481 92
d7e09d03
PT
93 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
94 SBI_DEFAULT_READAHEAD_MAX);
95 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
96 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
97 SBI_DEFAULT_READAHEAD_WHOLE_MAX;
d7e09d03
PT
98
99 ll_generate_random_uuid(uuid);
100 class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
101 CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
102
d7e09d03
PT
103 sbi->ll_flags |= LL_SBI_VERBOSE;
104 sbi->ll_flags |= LL_SBI_CHECKSUM;
105
106 sbi->ll_flags |= LL_SBI_LRU_RESIZE;
7304370b 107 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
d7e09d03
PT
108
109 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
110 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
111 pp_r_hist.oh_lock);
112 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
113 pp_w_hist.oh_lock);
114 }
115
116 /* metadata statahead is enabled by default */
117 sbi->ll_sa_max = LL_SA_RPC_DEF;
118 atomic_set(&sbi->ll_sa_total, 0);
119 atomic_set(&sbi->ll_sa_wrong, 0);
e9792be1 120 atomic_set(&sbi->ll_sa_running, 0);
d7e09d03
PT
121 atomic_set(&sbi->ll_agl_total, 0);
122 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
123
c948390f
GP
124 /* root squash */
125 sbi->ll_squash.rsi_uid = 0;
126 sbi->ll_squash.rsi_gid = 0;
127 INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
128 init_rwsem(&sbi->ll_squash.rsi_sem);
129
fd0d04ba
OD
130 sbi->ll_sb = sb;
131
0a3bdb00 132 return sbi;
d7e09d03
PT
133}
134
2d95f10e 135static void ll_free_sbi(struct super_block *sb)
d7e09d03
PT
136{
137 struct ll_sb_info *sbi = ll_s2sbi(sb);
d7e09d03 138
1b02bde3 139 if (sbi->ll_cache) {
c948390f
GP
140 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
141 cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
1b02bde3
EL
142 cl_cache_decref(sbi->ll_cache);
143 sbi->ll_cache = NULL;
144 }
145
ad88aae0 146 kfree(sbi);
d7e09d03
PT
147}
148
d7e09d03
PT
149static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
150 struct vfsmount *mnt)
151{
ea7893bb 152 struct inode *root = NULL;
d7e09d03
PT
153 struct ll_sb_info *sbi = ll_s2sbi(sb);
154 struct obd_device *obd;
d7e09d03
PT
155 struct obd_statfs *osfs = NULL;
156 struct ptlrpc_request *request = NULL;
157 struct obd_connect_data *data = NULL;
158 struct obd_uuid *uuid;
159 struct md_op_data *op_data;
160 struct lustre_md lmd;
21aef7d9 161 u64 valid;
d7e09d03 162 int size, err, checksum;
d7e09d03
PT
163
164 obd = class_name2obd(md);
165 if (!obd) {
166 CERROR("MD %s: not setup or attached\n", md);
0a3bdb00 167 return -EINVAL;
d7e09d03
PT
168 }
169
496a51bd
JL
170 data = kzalloc(sizeof(*data), GFP_NOFS);
171 if (!data)
0a3bdb00 172 return -ENOMEM;
d7e09d03 173
496a51bd
JL
174 osfs = kzalloc(sizeof(*osfs), GFP_NOFS);
175 if (!osfs) {
97903a26 176 kfree(data);
0a3bdb00 177 return -ENOMEM;
d7e09d03
PT
178 }
179
d7e09d03
PT
180 /* indicate the features supported by this client */
181 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
182 OBD_CONNECT_ATTRFID |
183 OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
d7e09d03
PT
184 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
185 OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
341f1f0a
FY
186 OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
187 OBD_CONNECT_64BITHASH |
d7e09d03
PT
188 OBD_CONNECT_EINPROGRESS |
189 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
7fc1f831 190 OBD_CONNECT_LAYOUTLOCK |
69342b78
AS
191 OBD_CONNECT_PINGLESS |
192 OBD_CONNECT_MAX_EASIZE |
63d42578 193 OBD_CONNECT_FLOCK_DEAD |
c1b66fcc 194 OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
4edc630a 195 OBD_CONNECT_OPEN_BY_FID |
8bcaef92
LZ
196 OBD_CONNECT_DIR_STRIPE |
197 OBD_CONNECT_BULK_MBITS;
d7e09d03 198
d7e09d03
PT
199 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
200 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
201#ifdef CONFIG_FS_POSIX_ACL
202 data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
203#endif
204
205 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
206 /* flag mdc connection as lightweight, only used for test
c0894c6c
OD
207 * purpose, use with care
208 */
d7e09d03
PT
209 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
210
211 data->ocd_ibits_known = MDS_INODELOCK_FULL;
212 data->ocd_version = LUSTRE_VERSION_CODE;
213
bc98a42c 214 if (sb_rdonly(sb))
d7e09d03
PT
215 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
216 if (sbi->ll_flags & LL_SBI_USER_XATTR)
217 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
218
d7e09d03
PT
219 if (sbi->ll_flags & LL_SBI_FLOCK)
220 sbi->ll_fop = &ll_file_operations_flock;
221 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
222 sbi->ll_fop = &ll_file_operations;
223 else
224 sbi->ll_fop = &ll_file_operations_noflock;
225
bfb9944c
WW
226 /* always ping even if server suppress_pings */
227 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
228 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
229
d7e09d03
PT
230 data->ocd_brw_size = MD_MAX_BRW_SIZE;
231
e6768831
TJ
232 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
233 data, NULL);
d7e09d03 234 if (err == -EBUSY) {
9f11748c
AG
235 LCONSOLE_ERROR_MSG(0x14f,
236 "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
2d00bd17 237 md);
34e1f2bb 238 goto out;
d7e09d03
PT
239 } else if (err) {
240 CERROR("cannot connect to %s: rc = %d\n", md, err);
34e1f2bb 241 goto out;
d7e09d03
PT
242 }
243
244 sbi->ll_md_exp->exp_connect_data = *data;
245
246 err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
247 LUSTRE_SEQ_METADATA);
248 if (err) {
2d00bd17
JP
249 CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
250 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 251 goto out_md;
d7e09d03
PT
252 }
253
254 /* For mount, we only need fs info from MDT0, and also in DNE, it
255 * can make sure the client can be mounted as long as MDT0 is
c0894c6c
OD
256 * available
257 */
d7e09d03 258 err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
e15ba45d
OD
259 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
260 OBD_STATFS_FOR_MDT0);
d7e09d03 261 if (err)
34e1f2bb 262 goto out_md_fid;
d7e09d03
PT
263
264 /* This needs to be after statfs to ensure connect has finished.
265 * Note that "data" does NOT contain the valid connect reply.
266 * If connecting to a 1.8 server there will be no LMV device, so
267 * we can access the MDC export directly and exp_connect_flags will
268 * be non-zero, but if accessing an upgraded 2.1 server it will
269 * have the correct flags filled in.
c0894c6c
OD
270 * XXX: fill in the LMV exp_connect_flags from MDC(s).
271 */
d7e09d03
PT
272 valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
273 if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
274 valid != CLIENT_CONNECT_MDT_REQD) {
275 char *buf;
276
09cbfeaf 277 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
db562e81
GEHP
278 if (!buf) {
279 err = -ENOMEM;
280 goto out_md_fid;
281 }
09cbfeaf 282 obd_connect_flags2str(buf, PAGE_SIZE,
d7e09d03 283 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
9f11748c
AG
284 LCONSOLE_ERROR_MSG(0x170,
285 "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
d7e09d03 286 sbi->ll_md_exp->exp_obd->obd_name, buf);
97903a26 287 kfree(buf);
34e1f2bb
JL
288 err = -EPROTO;
289 goto out_md_fid;
d7e09d03
PT
290 }
291
292 size = sizeof(*data);
293 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
7c6564d0 294 KEY_CONN_DATA, &size, data);
d7e09d03
PT
295 if (err) {
296 CERROR("%s: Get connect data failed: rc = %d\n",
297 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 298 goto out_md_fid;
d7e09d03
PT
299 }
300
301 LASSERT(osfs->os_bsize);
302 sb->s_blocksize = osfs->os_bsize;
303 sb->s_blocksize_bits = log2(osfs->os_bsize);
304 sb->s_magic = LL_SUPER_MAGIC;
305 sb->s_maxbytes = MAX_LFS_FILESIZE;
306 sbi->ll_namelen = osfs->os_namelen;
3f4f7824 307 sbi->ll_mnt.mnt = current->fs->root.mnt;
d7e09d03
PT
308
309 if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
310 !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
2d00bd17 311 LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
d7e09d03
PT
312 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
313 }
314
315 if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
1751e8a6 316 sb->s_flags |= SB_POSIXACL;
d7e09d03
PT
317 sbi->ll_flags |= LL_SBI_ACL;
318 } else {
319 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
1751e8a6 320 sb->s_flags &= ~SB_POSIXACL;
d7e09d03
PT
321 sbi->ll_flags &= ~LL_SBI_ACL;
322 }
323
d7e09d03
PT
324 if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
325 sbi->ll_flags |= LL_SBI_64BIT_HASH;
326
327 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
d8c0b0a9 328 sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_SHIFT;
d7e09d03 329 else
d8c0b0a9 330 sbi->ll_md_brw_pages = 1;
d7e09d03 331
ae33a836 332 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
d7e09d03 333 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
d7e09d03 334
7fc1f831
AP
335 if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
336 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
337 LCONSOLE_INFO(
338 "%s: disabling xattr cache due to unknown maximum xattr size.\n",
339 dt);
340 } else {
341 sbi->ll_flags |= LL_SBI_XATTR_CACHE;
342 sbi->ll_xattr_cache_enabled = 1;
343 }
344 }
345
d7e09d03
PT
346 obd = class_name2obd(dt);
347 if (!obd) {
348 CERROR("DT %s: not setup or attached\n", dt);
34e1f2bb
JL
349 err = -ENODEV;
350 goto out_md_fid;
d7e09d03
PT
351 }
352
353 data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
354 OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
355 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
356 OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
341f1f0a
FY
357 OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
358 OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
359 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
d7e09d03
PT
360 OBD_CONNECT_EINPROGRESS |
361 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
8bcaef92
LZ
362 OBD_CONNECT_LAYOUTLOCK |
363 OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
364 OBD_CONNECT_BULK_MBITS;
d7e09d03 365
d7e09d03
PT
366 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
367 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
368 * disabled by default, because it can still be enabled on the
40cc864a 369 * fly via /sys. As a consequence, we still need to come to an
c0894c6c
OD
370 * agreement on the supported algorithms at connect time
371 */
d7e09d03
PT
372 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
373
374 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
375 data->ocd_cksum_types = OBD_CKSUM_ADLER;
376 else
377 data->ocd_cksum_types = cksum_types_supported_client();
378 }
379
380 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
d7e09d03 381
bfb9944c
WW
382 /* always ping even if server suppress_pings */
383 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
384 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
385
9f11748c
AG
386 CDEBUG(D_RPCTRACE,
387 "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
2d00bd17 388 data->ocd_connect_flags,
d7e09d03
PT
389 data->ocd_version, data->ocd_grant);
390
391 obd->obd_upcall.onu_owner = &sbi->ll_lco;
392 obd->obd_upcall.onu_upcall = cl_ocd_update;
393
394 data->ocd_brw_size = DT_MAX_BRW_SIZE;
395
396 err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
397 NULL);
398 if (err == -EBUSY) {
9f11748c
AG
399 LCONSOLE_ERROR_MSG(0x150,
400 "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
2d00bd17 401 dt);
34e1f2bb 402 goto out_md;
d7e09d03
PT
403 } else if (err) {
404 CERROR("%s: Cannot connect to %s: rc = %d\n",
405 sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
34e1f2bb 406 goto out_md;
d7e09d03
PT
407 }
408
409 sbi->ll_dt_exp->exp_connect_data = *data;
410
411 err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
412 LUSTRE_SEQ_METADATA);
413 if (err) {
2d00bd17
JP
414 CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
415 sbi->ll_dt_exp->exp_obd->obd_name, err);
34e1f2bb 416 goto out_dt;
d7e09d03
PT
417 }
418
419 mutex_lock(&sbi->ll_lco.lco_lock);
420 sbi->ll_lco.lco_flags = data->ocd_connect_flags;
421 sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
422 sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
423 mutex_unlock(&sbi->ll_lco.lco_lock);
424
425 fid_zero(&sbi->ll_root_fid);
ef2e0f55 426 err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid);
d7e09d03
PT
427 if (err) {
428 CERROR("cannot mds_connect: rc = %d\n", err);
34e1f2bb 429 goto out_lock_cn_cb;
d7e09d03
PT
430 }
431 if (!fid_is_sane(&sbi->ll_root_fid)) {
1ada25dc 432 CERROR("%s: Invalid root fid " DFID " during mount\n",
d7e09d03
PT
433 sbi->ll_md_exp->exp_obd->obd_name,
434 PFID(&sbi->ll_root_fid));
34e1f2bb
JL
435 err = -EINVAL;
436 goto out_lock_cn_cb;
d7e09d03 437 }
1ada25dc 438 CDEBUG(D_SUPER, "rootfid " DFID "\n", PFID(&sbi->ll_root_fid));
d7e09d03
PT
439
440 sb->s_op = &lustre_super_operations;
2c563880 441 sb->s_xattr = ll_xattr_handlers;
d7e09d03
PT
442#if THREAD_SIZE >= 8192 /*b=17630*/
443 sb->s_export_op = &lustre_export_operations;
444#endif
445
446 /* make root inode
c0894c6c
OD
447 * XXX: move this to after cbd setup?
448 */
483eec0d 449 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
341f1f0a 450 if (sbi->ll_flags & LL_SBI_ACL)
d7e09d03
PT
451 valid |= OBD_MD_FLACL;
452
496a51bd
JL
453 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
454 if (!op_data) {
34e1f2bb
JL
455 err = -ENOMEM;
456 goto out_lock_cn_cb;
457 }
d7e09d03
PT
458
459 op_data->op_fid1 = sbi->ll_root_fid;
460 op_data->op_mode = 0;
d7e09d03
PT
461 op_data->op_valid = valid;
462
463 err = md_getattr(sbi->ll_md_exp, op_data, &request);
97903a26 464 kfree(op_data);
d7e09d03
PT
465 if (err) {
466 CERROR("%s: md_getattr failed for root: rc = %d\n",
467 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 468 goto out_lock_cn_cb;
d7e09d03
PT
469 }
470
471 err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
472 sbi->ll_md_exp, &lmd);
473 if (err) {
474 CERROR("failed to understand root inode md: rc = %d\n", err);
475 ptlrpc_req_finished(request);
34e1f2bb 476 goto out_lock_cn_cb;
d7e09d03
PT
477 }
478
479 LASSERT(fid_is_sane(&sbi->ll_root_fid));
480 root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
c1e2699d 481 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03
PT
482 &lmd);
483 md_free_lustre_md(sbi->ll_md_exp, &lmd);
484 ptlrpc_req_finished(request);
485
c3397e7e 486 if (IS_ERR(root)) {
d7e09d03
PT
487#ifdef CONFIG_FS_POSIX_ACL
488 if (lmd.posix_acl) {
489 posix_acl_release(lmd.posix_acl);
490 lmd.posix_acl = NULL;
491 }
492#endif
020ecc6f 493 err = -EBADF;
d7e09d03 494 CERROR("lustre_lite: bad iget4 for root\n");
34e1f2bb 495 goto out_root;
d7e09d03
PT
496 }
497
d7e09d03
PT
498 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
499 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
500 KEY_CHECKSUM, sizeof(checksum), &checksum,
501 NULL);
76cc3abe
YS
502 if (err) {
503 CERROR("%s: Set checksum failed: rc = %d\n",
504 sbi->ll_dt_exp->exp_obd->obd_name, err);
505 goto out_root;
506 }
d7e09d03
PT
507 cl_sb_init(sb);
508
509 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
1b02bde3
EL
510 KEY_CACHE_SET, sizeof(*sbi->ll_cache),
511 sbi->ll_cache, NULL);
76cc3abe
YS
512 if (err) {
513 CERROR("%s: Set cache_set failed: rc = %d\n",
514 sbi->ll_dt_exp->exp_obd->obd_name, err);
515 goto out_root;
516 }
d7e09d03
PT
517
518 sb->s_root = d_make_root(root);
6e16818b 519 if (!sb->s_root) {
d7e09d03 520 CERROR("%s: can't make root dentry\n",
e15ba45d 521 ll_get_fsname(sb, NULL, 0));
34e1f2bb 522 err = -ENOMEM;
caf382fe 523 goto out_lock_cn_cb;
d7e09d03
PT
524 }
525
d7e09d03
PT
526 sbi->ll_sdev_orig = sb->s_dev;
527
528 /* We set sb->s_dev equal on all lustre clients in order to support
529 * NFS export clustering. NFSD requires that the FSID be the same
c0894c6c
OD
530 * on all clients.
531 */
d7e09d03 532 /* s_dev is also used in lt_compare() to compare two fs, but that is
c0894c6c
OD
533 * only a node-local comparison.
534 */
d7e09d03 535 uuid = obd_get_uuid(sbi->ll_md_exp);
6e16818b 536 if (uuid) {
d7e09d03 537 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
bd994071
FY
538 get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
539 }
d7e09d03 540
081825f5
JL
541 kfree(data);
542 kfree(osfs);
d7e09d03 543
46dfb5aa
GM
544 if (llite_root) {
545 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
546 if (err < 0) {
547 CERROR("%s: could not register mount in debugfs: "
548 "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
549 err = 0;
550 }
551 }
552
0a3bdb00 553 return err;
d7e09d03 554out_root:
ddafd514 555 iput(root);
d7e09d03
PT
556out_lock_cn_cb:
557 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
558out_dt:
559 obd_disconnect(sbi->ll_dt_exp);
560 sbi->ll_dt_exp = NULL;
d7e09d03
PT
561out_md_fid:
562 obd_fid_fini(sbi->ll_md_exp->exp_obd);
563out_md:
564 obd_disconnect(sbi->ll_md_exp);
565 sbi->ll_md_exp = NULL;
566out:
081825f5
JL
567 kfree(data);
568 kfree(osfs);
d7e09d03
PT
569 return err;
570}
571
572int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
573{
574 int size, rc;
575
f648eed6
JH
576 size = sizeof(*lmmsize);
577 rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
578 KEY_MAX_EASIZE, &size, lmmsize);
579 if (rc) {
580 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
581 sbi->ll_dt_exp->exp_obd->obd_name, rc);
582 return rc;
583 }
584
d7e09d03
PT
585 size = sizeof(int);
586 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
7c6564d0 587 KEY_MAX_EASIZE, &size, lmmsize);
d7e09d03 588 if (rc)
4f211c20 589 CERROR("Get max mdsize error rc %d\n", rc);
d7e09d03 590
0a3bdb00 591 return rc;
44779340
BB
592}
593
60b65afb
NB
594/**
595 * Get the value of the default_easize parameter.
596 *
597 * \see client_obd::cl_default_mds_easize
598 *
599 * \param[in] sbi superblock info for this filesystem
600 * \param[out] lmmsize pointer to storage location for value
601 *
602 * \retval 0 on success
603 * \retval negative negated errno on failure
604 */
44779340
BB
605int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
606{
607 int size, rc;
608
609 size = sizeof(int);
610 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
7c6564d0 611 KEY_DEFAULT_EASIZE, &size, lmmsize);
44779340
BB
612 if (rc)
613 CERROR("Get default mdsize error rc %d\n", rc);
614
615 return rc;
616}
617
60b65afb
NB
618/**
619 * Set the default_easize parameter to the given value.
620 *
621 * \see client_obd::cl_default_mds_easize
622 *
623 * \param[in] sbi superblock info for this filesystem
624 * \param[in] lmmsize the size to set
625 *
626 * \retval 0 on success
627 * \retval negative negated errno on failure
628 */
629int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
630{
8ed62e91
NB
631 if (lmmsize < sizeof(struct lov_mds_md) ||
632 lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
60b65afb
NB
633 return -EINVAL;
634
635 return obd_set_info_async(NULL, sbi->ll_md_exp,
636 sizeof(KEY_DEFAULT_EASIZE),
637 KEY_DEFAULT_EASIZE,
638 sizeof(int), &lmmsize, NULL);
639}
640
2d95f10e 641static void client_common_put_super(struct super_block *sb)
d7e09d03
PT
642{
643 struct ll_sb_info *sbi = ll_s2sbi(sb);
d7e09d03 644
d7e09d03
PT
645 cl_sb_fini(sb);
646
d7e09d03
PT
647 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
648 obd_disconnect(sbi->ll_dt_exp);
649 sbi->ll_dt_exp = NULL;
d7e09d03 650
ae7c0f48 651 ldebugfs_unregister_mountpoint(sbi);
d7e09d03
PT
652
653 obd_fid_fini(sbi->ll_md_exp->exp_obd);
654 obd_disconnect(sbi->ll_md_exp);
655 sbi->ll_md_exp = NULL;
d7e09d03
PT
656}
657
658void ll_kill_super(struct super_block *sb)
659{
660 struct ll_sb_info *sbi;
661
d7e09d03 662 /* not init sb ?*/
1751e8a6 663 if (!(sb->s_flags & SB_ACTIVE))
d7e09d03
PT
664 return;
665
666 sbi = ll_s2sbi(sb);
e6768831
TJ
667 /* we need to restore s_dev from changed for clustered NFS before
668 * put_super because new kernels have cached s_dev and change sb->s_dev
c0894c6c
OD
669 * in put_super not affected real removing devices
670 */
65fb55d1 671 if (sbi) {
d7e09d03 672 sb->s_dev = sbi->ll_sdev_orig;
65fb55d1 673 sbi->ll_umounting = 1;
e9792be1
LS
674
675 /* wait running statahead threads to quit */
676 while (atomic_read(&sbi->ll_sa_running) > 0) {
677 set_current_state(TASK_UNINTERRUPTIBLE);
678 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC >> 3));
679 }
65fb55d1 680 }
d7e09d03
PT
681}
682
d7e09d03
PT
683static inline int ll_set_opt(const char *opt, char *data, int fl)
684{
685 if (strncmp(opt, data, strlen(opt)) != 0)
fbe7c6c7 686 return 0;
d7e09d03 687 else
fbe7c6c7 688 return fl;
d7e09d03
PT
689}
690
691/* non-client-specific mount options are parsed in lmd_parse */
692static int ll_options(char *options, int *flags)
693{
694 int tmp;
695 char *s1 = options, *s2;
d7e09d03
PT
696
697 if (!options)
0a3bdb00 698 return 0;
d7e09d03
PT
699
700 CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
701
702 while (*s1) {
703 CDEBUG(D_SUPER, "next opt=%s\n", s1);
704 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
705 if (tmp) {
706 *flags |= tmp;
707 goto next;
708 }
709 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
710 if (tmp) {
711 *flags |= tmp;
712 goto next;
713 }
714 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
715 if (tmp) {
716 *flags |= tmp;
717 goto next;
718 }
cd94f231
OD
719 tmp = ll_set_opt("noflock", s1,
720 LL_SBI_FLOCK | LL_SBI_LOCALFLOCK);
d7e09d03
PT
721 if (tmp) {
722 *flags &= ~tmp;
723 goto next;
724 }
725 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
726 if (tmp) {
727 *flags |= tmp;
728 goto next;
729 }
730 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
731 if (tmp) {
732 *flags &= ~tmp;
733 goto next;
734 }
542c45ac
AW
735 tmp = ll_set_opt("context", s1, 1);
736 if (tmp)
737 goto next;
738 tmp = ll_set_opt("fscontext", s1, 1);
739 if (tmp)
740 goto next;
741 tmp = ll_set_opt("defcontext", s1, 1);
742 if (tmp)
743 goto next;
744 tmp = ll_set_opt("rootcontext", s1, 1);
745 if (tmp)
746 goto next;
d7e09d03
PT
747 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
748 if (tmp) {
749 *flags |= tmp;
750 goto next;
751 }
752 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
753 if (tmp) {
754 *flags &= ~tmp;
755 goto next;
756 }
757
758 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
759 if (tmp) {
760 *flags |= tmp;
761 goto next;
762 }
763 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
764 if (tmp) {
765 *flags &= ~tmp;
766 goto next;
767 }
768 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
769 if (tmp) {
770 *flags |= tmp;
771 goto next;
772 }
773 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
774 if (tmp) {
775 *flags &= ~tmp;
776 goto next;
777 }
778 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
779 if (tmp) {
780 *flags |= tmp;
781 goto next;
782 }
783 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
784 if (tmp) {
785 *flags &= ~tmp;
786 goto next;
787 }
d7e09d03
PT
788 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
789 if (tmp) {
790 *flags |= tmp;
791 goto next;
792 }
793 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
794 if (tmp) {
795 *flags |= tmp;
796 goto next;
797 }
798 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
799 if (tmp) {
800 *flags &= ~tmp;
801 goto next;
802 }
bfb9944c
WW
803 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
804 if (tmp) {
805 *flags |= tmp;
806 goto next;
807 }
d7e09d03
PT
808 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
809 s1);
0a3bdb00 810 return -EINVAL;
d7e09d03
PT
811
812next:
813 /* Find next opt */
814 s2 = strchr(s1, ',');
6e16818b 815 if (!s2)
d7e09d03
PT
816 break;
817 s1 = s2 + 1;
818 }
0a3bdb00 819 return 0;
d7e09d03
PT
820}
821
822void ll_lli_init(struct ll_inode_info *lli)
823{
824 lli->lli_inode_magic = LLI_INODE_MAGIC;
825 lli->lli_flags = 0;
d7e09d03
PT
826 spin_lock_init(&lli->lli_lock);
827 lli->lli_posix_acl = NULL;
d7e09d03
PT
828 /* Do not set lli_fid, it has been initialized already. */
829 fid_zero(&lli->lli_pfid);
d7e09d03
PT
830 lli->lli_mds_read_och = NULL;
831 lli->lli_mds_write_och = NULL;
832 lli->lli_mds_exec_och = NULL;
833 lli->lli_open_fd_read_count = 0;
834 lli->lli_open_fd_write_count = 0;
835 lli->lli_open_fd_exec_count = 0;
836 mutex_init(&lli->lli_och_mutex);
837 spin_lock_init(&lli->lli_agl_lock);
09aed8a5 838 spin_lock_init(&lli->lli_layout_lock);
55554f31 839 ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
d7e09d03
PT
840 lli->lli_clob = NULL;
841
7fc1f831
AP
842 init_rwsem(&lli->lli_xattrs_list_rwsem);
843 mutex_init(&lli->lli_xattrs_enq_lock);
844
d7e09d03
PT
845 LASSERT(lli->lli_vfs_inode.i_mode != 0);
846 if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
847 mutex_init(&lli->lli_readdir_mutex);
848 lli->lli_opendir_key = NULL;
849 lli->lli_sai = NULL;
d7e09d03
PT
850 spin_lock_init(&lli->lli_sa_lock);
851 lli->lli_opendir_pid = 0;
e9792be1 852 lli->lli_sa_enabled = 0;
d81e9009 853 lli->lli_def_stripe_offset = -1;
d7e09d03 854 } else {
47a57bde 855 mutex_init(&lli->lli_size_mutex);
d7e09d03
PT
856 lli->lli_symlink_name = NULL;
857 init_rwsem(&lli->lli_trunc_sem);
5b8a39c5 858 range_lock_tree_init(&lli->lli_write_tree);
d7e09d03
PT
859 init_rwsem(&lli->lli_glimpse_sem);
860 lli->lli_glimpse_time = 0;
861 INIT_LIST_HEAD(&lli->lli_agl_list);
862 lli->lli_agl_index = 0;
863 lli->lli_async_rc = 0;
d7e09d03
PT
864 }
865 mutex_init(&lli->lli_layout_mutex);
866}
867
d7e09d03
PT
868int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
869{
870 struct lustre_profile *lprof = NULL;
871 struct lustre_sb_info *lsi = s2lsi(sb);
872 struct ll_sb_info *sbi;
873 char *dt = NULL, *md = NULL;
874 char *profilenm = get_profile_name(sb);
875 struct config_llog_instance *cfg;
d7e09d03 876 int err;
9594caf2 877 static atomic_t ll_bdi_num = ATOMIC_INIT(0);
d7e09d03
PT
878
879 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
880
496a51bd
JL
881 cfg = kzalloc(sizeof(*cfg), GFP_NOFS);
882 if (!cfg)
0a3bdb00 883 return -ENOMEM;
d7e09d03
PT
884
885 try_module_get(THIS_MODULE);
886
887 /* client additional sb info */
7551b8b5
NC
888 sbi = ll_init_sbi(sb);
889 lsi->lsi_llsbi = sbi;
d7e09d03
PT
890 if (!sbi) {
891 module_put(THIS_MODULE);
97903a26 892 kfree(cfg);
0a3bdb00 893 return -ENOMEM;
d7e09d03
PT
894 }
895
896 err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
897 if (err)
34e1f2bb 898 goto out_free;
d7e09d03 899
9594caf2
JK
900 err = super_setup_bdi_name(sb, "lustre-%d",
901 atomic_inc_return(&ll_bdi_num));
d7e09d03 902 if (err)
34e1f2bb 903 goto out_free;
d7e09d03 904
3ea8f3bc
LS
905 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
906 sb->s_d_op = &ll_d_ops;
d7e09d03
PT
907
908 /* Generate a string unique to this super, in case some joker tries
c0894c6c
OD
909 * to mount the same fs at two mount points.
910 * Use the address of the super itself.
911 */
d7e09d03
PT
912 cfg->cfg_instance = sb;
913 cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
914 cfg->cfg_callback = class_config_llog_handler;
915 /* set up client obds */
916 err = lustre_process_log(sb, profilenm, cfg);
4fd9a8e9 917 if (err < 0)
34e1f2bb 918 goto out_free;
d7e09d03
PT
919
920 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
921 lprof = class_get_profile(profilenm);
6e16818b 922 if (!lprof) {
9f11748c
AG
923 LCONSOLE_ERROR_MSG(0x156,
924 "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
2d00bd17 925 profilenm);
34e1f2bb
JL
926 err = -EINVAL;
927 goto out_free;
d7e09d03
PT
928 }
929 CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
930 lprof->lp_md, lprof->lp_dt);
931
95745e9b 932 dt = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
34e1f2bb
JL
933 if (!dt) {
934 err = -ENOMEM;
935 goto out_free;
936 }
d7e09d03 937
ef2e1a44 938 md = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_md, cfg->cfg_instance);
34e1f2bb
JL
939 if (!md) {
940 err = -ENOMEM;
941 goto out_free;
942 }
d7e09d03
PT
943
944 /* connections, registrations, sb setup */
945 err = client_common_fill_super(sb, md, dt, mnt);
0cd99931
JH
946 if (!err)
947 sbi->ll_client_common_fill_super_succeeded = 1;
d7e09d03
PT
948
949out_free:
0550db92 950 kfree(md);
951 kfree(dt);
f65053df
HN
952 if (lprof)
953 class_put_profile(lprof);
d7e09d03
PT
954 if (err)
955 ll_put_super(sb);
956 else if (sbi->ll_flags & LL_SBI_VERBOSE)
957 LCONSOLE_WARN("Mounted %s\n", profilenm);
958
97903a26 959 kfree(cfg);
0a3bdb00 960 return err;
d7e09d03
PT
961} /* ll_fill_super */
962
d7e09d03
PT
963void ll_put_super(struct super_block *sb)
964{
7d4bae45 965 struct config_llog_instance cfg, params_cfg;
d7e09d03
PT
966 struct obd_device *obd;
967 struct lustre_sb_info *lsi = s2lsi(sb);
968 struct ll_sb_info *sbi = ll_s2sbi(sb);
969 char *profilenm = get_profile_name(sb);
29c877a5
SC
970 int next, force = 1, rc = 0;
971 long ccc_count;
d7e09d03
PT
972
973 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
974
d7e09d03
PT
975 cfg.cfg_instance = sb;
976 lustre_end_log(sb, profilenm, &cfg);
977
7d4bae45
AB
978 params_cfg.cfg_instance = sb;
979 lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
980
d7e09d03
PT
981 if (sbi->ll_md_exp) {
982 obd = class_exp2obd(sbi->ll_md_exp);
983 if (obd)
984 force = obd->obd_force;
985 }
986
ac5b1481
PS
987 /* Wait for unstable pages to be committed to stable storage */
988 if (!force) {
989 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
990
1b02bde3 991 rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
29c877a5 992 !atomic_long_read(&sbi->ll_cache->ccc_unstable_nr),
ac5b1481
PS
993 &lwi);
994 }
995
29c877a5 996 ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
ac5b1481 997 if (!force && rc != -EINTR)
29c877a5 998 LASSERTF(!ccc_count, "count: %li\n", ccc_count);
ac5b1481 999
d7e09d03 1000 /* We need to set force before the lov_disconnect in
c0894c6c
OD
1001 * lustre_common_put_super, since l_d cleans up osc's as well.
1002 */
d7e09d03
PT
1003 if (force) {
1004 next = 0;
1005 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1006 &next)) != NULL) {
1007 obd->obd_force = force;
1008 }
1009 }
1010
0cd99931 1011 if (sbi->ll_client_common_fill_super_succeeded) {
d7e09d03
PT
1012 /* Only if client_common_fill_super succeeded */
1013 client_common_put_super(sb);
1014 }
1015
1016 next = 0;
a15dbf99 1017 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
d7e09d03 1018 class_manual_cleanup(obd);
d7e09d03
PT
1019
1020 if (sbi->ll_flags & LL_SBI_VERBOSE)
1021 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1022
1023 if (profilenm)
1024 class_del_profile(profilenm);
1025
d7e09d03
PT
1026 ll_free_sbi(sb);
1027 lsi->lsi_llsbi = NULL;
1028
1029 lustre_common_put_super(sb);
1030
26f98e82
JX
1031 cl_env_cache_purge(~0);
1032
d7e09d03 1033 module_put(THIS_MODULE);
d7e09d03
PT
1034} /* client_put_super */
1035
1036struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1037{
1038 struct inode *inode = NULL;
1039
1040 /* NOTE: we depend on atomic igrab() -bzzz */
1041 lock_res_and_lock(lock);
1042 if (lock->l_resource->lr_lvb_inode) {
aff9d8e8 1043 struct ll_inode_info *lli;
cf29a7b6 1044
d7e09d03
PT
1045 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1046 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1047 inode = igrab(lock->l_resource->lr_lvb_inode);
1048 } else {
1049 inode = lock->l_resource->lr_lvb_inode;
1050 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
9f11748c
AG
1051 D_WARNING, lock,
1052 "lr_lvb_inode %p is bogus: magic %08x",
d7e09d03
PT
1053 lock->l_resource->lr_lvb_inode,
1054 lli->lli_inode_magic);
1055 inode = NULL;
1056 }
1057 }
1058 unlock_res_and_lock(lock);
1059 return inode;
1060}
1061
a80ba5fe 1062void ll_dir_clear_lsm_md(struct inode *inode)
2de35386 1063{
1064 struct ll_inode_info *lli = ll_i2info(inode);
1065
1066 LASSERT(S_ISDIR(inode->i_mode));
1067
1068 if (lli->lli_lsm_md) {
1069 lmv_free_memmd(lli->lli_lsm_md);
1070 lli->lli_lsm_md = NULL;
1071 }
1072}
1073
1074static struct inode *ll_iget_anon_dir(struct super_block *sb,
1075 const struct lu_fid *fid,
1076 struct lustre_md *md)
1077{
1078 struct ll_sb_info *sbi = ll_s2sbi(sb);
1079 struct mdt_body *body = md->body;
1080 struct inode *inode;
1081 ino_t ino;
1082
1083 ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1084 inode = iget_locked(sb, ino);
1085 if (!inode) {
1ada25dc 1086 CERROR("%s: failed get simple inode " DFID ": rc = -ENOENT\n",
2de35386 1087 ll_get_fsname(sb, NULL, 0), PFID(fid));
1088 return ERR_PTR(-ENOENT);
1089 }
1090
1091 if (inode->i_state & I_NEW) {
1092 struct ll_inode_info *lli = ll_i2info(inode);
1093 struct lmv_stripe_md *lsm = md->lmv;
1094
1095 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2e1b5b8b 1096 (body->mbo_mode & S_IFMT);
1ada25dc 1097 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode " DFID "\n",
2de35386 1098 PFID(fid));
1099
1100 LTIME_S(inode->i_mtime) = 0;
1101 LTIME_S(inode->i_atime) = 0;
1102 LTIME_S(inode->i_ctime) = 0;
1103 inode->i_rdev = 0;
1104
1105 inode->i_op = &ll_dir_inode_operations;
1106 inode->i_fop = &ll_dir_operations;
1107 lli->lli_fid = *fid;
1108 ll_lli_init(lli);
1109
1110 LASSERT(lsm);
8f18c8a4 1111 /* master object FID */
2e1b5b8b 1112 lli->lli_pfid = body->mbo_fid1;
1ada25dc 1113 CDEBUG(D_INODE, "lli %p slave " DFID " master " DFID "\n",
2de35386 1114 lli, PFID(fid), PFID(&lli->lli_pfid));
1115 unlock_new_inode(inode);
1116 }
1117
1118 return inode;
1119}
1120
1121static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1122{
1123 struct lmv_stripe_md *lsm = md->lmv;
1124 struct lu_fid *fid;
1125 int i;
1126
1127 LASSERT(lsm);
1128 /*
1129 * XXX sigh, this lsm_root initialization should be in
1130 * LMV layer, but it needs ll_iget right now, so we
1131 * put this here right now.
1132 */
1133 for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1134 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1135 LASSERT(!lsm->lsm_md_oinfo[i].lmo_root);
8f18c8a4 1136 /* Unfortunately ll_iget will call ll_update_inode,
1137 * where the initialization of slave inode is slightly
1138 * different, so it reset lsm_md to NULL to avoid
1139 * initializing lsm for slave inode.
1140 */
1141 /* For migrating inode, master stripe and master object will
1142 * be same, so we only need assign this inode
1143 */
1144 if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && !i)
2de35386 1145 lsm->lsm_md_oinfo[i].lmo_root = inode;
8f18c8a4 1146 else
2de35386 1147 lsm->lsm_md_oinfo[i].lmo_root =
1148 ll_iget_anon_dir(inode->i_sb, fid, md);
8f18c8a4 1149 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1150 int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
2de35386 1151
8f18c8a4 1152 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1153 return rc;
2de35386 1154 }
1155 }
1156
15b241c5 1157 return 0;
2de35386 1158}
1159
1160static inline int lli_lsm_md_eq(const struct lmv_stripe_md *lsm_md1,
1161 const struct lmv_stripe_md *lsm_md2)
1162{
1163 return lsm_md1->lsm_md_magic == lsm_md2->lsm_md_magic &&
1164 lsm_md1->lsm_md_stripe_count == lsm_md2->lsm_md_stripe_count &&
1165 lsm_md1->lsm_md_master_mdt_index ==
1166 lsm_md2->lsm_md_master_mdt_index &&
1167 lsm_md1->lsm_md_hash_type == lsm_md2->lsm_md_hash_type &&
1168 lsm_md1->lsm_md_layout_version ==
1169 lsm_md2->lsm_md_layout_version &&
1170 !strcmp(lsm_md1->lsm_md_pool_name,
1171 lsm_md2->lsm_md_pool_name);
1172}
1173
c3397e7e 1174static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
2de35386 1175{
1176 struct ll_inode_info *lli = ll_i2info(inode);
1177 struct lmv_stripe_md *lsm = md->lmv;
8f18c8a4 1178 int rc;
2de35386 1179
2de35386 1180 LASSERT(S_ISDIR(inode->i_mode));
1ada25dc 1181 CDEBUG(D_INODE, "update lsm %p of " DFID "\n", lli->lli_lsm_md,
79496845 1182 PFID(ll_inode2fid(inode)));
1183
1184 /* no striped information from request. */
1185 if (!lsm) {
1186 if (!lli->lli_lsm_md) {
c3397e7e 1187 return 0;
8f18c8a4 1188 } else if (lli->lli_lsm_md->lsm_md_hash_type &
1189 LMV_HASH_FLAG_MIGRATION) {
79496845 1190 /*
1191 * migration is done, the temporay MIGRATE layout has
1192 * been removed
1193 */
1ada25dc 1194 CDEBUG(D_INODE, DFID " finish migration.\n",
79496845 1195 PFID(ll_inode2fid(inode)));
1196 lmv_free_memmd(lli->lli_lsm_md);
1197 lli->lli_lsm_md = NULL;
c3397e7e 1198 return 0;
79496845 1199 } else {
1200 /*
1201 * The lustre_md from req does not include stripeEA,
1202 * see ll_md_setattr
1203 */
c3397e7e 1204 return 0;
79496845 1205 }
1206 }
1207
1208 /* set the directory layout */
2de35386 1209 if (!lli->lli_lsm_md) {
b1839e0e 1210 struct cl_attr *attr;
1211
2de35386 1212 rc = ll_init_lsm_md(inode, md);
c3397e7e 1213 if (rc)
1214 return rc;
1215
2de35386 1216 /*
1217 * set lsm_md to NULL, so the following free lustre_md
1218 * will not free this lsm
1219 */
1220 md->lmv = NULL;
b1839e0e 1221 lli->lli_lsm_md = lsm;
1222
1223 attr = kzalloc(sizeof(*attr), GFP_NOFS);
1224 if (!attr)
1225 return -ENOMEM;
1226
1227 /* validate the lsm */
1228 rc = md_merge_attr(ll_i2mdexp(inode), lsm, attr,
1229 ll_md_blocking_ast);
1230 if (rc) {
1231 kfree(attr);
1232 return rc;
1233 }
1234
1235 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1236 md->body->mbo_nlink = attr->cat_nlink;
1237 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1238 md->body->mbo_size = attr->cat_size;
1239 if (md->body->mbo_valid & OBD_MD_FLATIME)
1240 md->body->mbo_atime = attr->cat_atime;
1241 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1242 md->body->mbo_ctime = attr->cat_ctime;
1243 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1244 md->body->mbo_mtime = attr->cat_mtime;
1245
1246 kfree(attr);
1247
1ada25dc 1248 CDEBUG(D_INODE, "Set lsm %p magic %x to " DFID "\n", lsm,
79496845 1249 lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
c3397e7e 1250 return 0;
2de35386 1251 }
1252
1253 /* Compare the old and new stripe information */
8f18c8a4 1254 if (!lsm_md_eq(lli->lli_lsm_md, lsm)) {
1255 struct lmv_stripe_md *old_lsm = lli->lli_lsm_md;
1256 int idx;
1257
1ada25dc 1258 CERROR("%s: inode " DFID "(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
8f18c8a4 1259 ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid),
1260 inode, lsm, old_lsm,
1261 lsm->lsm_md_magic, old_lsm->lsm_md_magic,
2de35386 1262 lsm->lsm_md_stripe_count,
8f18c8a4 1263 old_lsm->lsm_md_stripe_count,
2de35386 1264 lsm->lsm_md_master_mdt_index,
8f18c8a4 1265 old_lsm->lsm_md_master_mdt_index,
1266 lsm->lsm_md_hash_type, old_lsm->lsm_md_hash_type,
2de35386 1267 lsm->lsm_md_layout_version,
8f18c8a4 1268 old_lsm->lsm_md_layout_version,
2de35386 1269 lsm->lsm_md_pool_name,
8f18c8a4 1270 old_lsm->lsm_md_pool_name);
1271
1272 for (idx = 0; idx < old_lsm->lsm_md_stripe_count; idx++) {
1ada25dc 1273 CERROR("%s: sub FIDs in old lsm idx %d, old: " DFID "\n",
8f18c8a4 1274 ll_get_fsname(inode->i_sb, NULL, 0), idx,
1275 PFID(&old_lsm->lsm_md_oinfo[idx].lmo_fid));
1276 }
2de35386 1277
8f18c8a4 1278 for (idx = 0; idx < lsm->lsm_md_stripe_count; idx++) {
1ada25dc 1279 CERROR("%s: sub FIDs in new lsm idx %d, new: " DFID "\n",
2de35386 1280 ll_get_fsname(inode->i_sb, NULL, 0), idx,
2de35386 1281 PFID(&lsm->lsm_md_oinfo[idx].lmo_fid));
2de35386 1282 }
8f18c8a4 1283
1284 return -EIO;
2de35386 1285 }
1286
8f18c8a4 1287 return 0;
2de35386 1288}
1289
d7e09d03
PT
1290void ll_clear_inode(struct inode *inode)
1291{
1292 struct ll_inode_info *lli = ll_i2info(inode);
1293 struct ll_sb_info *sbi = ll_i2sbi(inode);
d7e09d03 1294
1ada25dc 1295 CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
97a075cd 1296 PFID(ll_inode2fid(inode)), inode);
d7e09d03
PT
1297
1298 if (S_ISDIR(inode->i_mode)) {
1299 /* these should have been cleared in ll_file_release */
6e16818b
OD
1300 LASSERT(!lli->lli_opendir_key);
1301 LASSERT(!lli->lli_sai);
d7e09d03
PT
1302 LASSERT(lli->lli_opendir_pid == 0);
1303 }
1304
d7e09d03
PT
1305 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1306
1307 LASSERT(!lli->lli_open_fd_write_count);
1308 LASSERT(!lli->lli_open_fd_read_count);
1309 LASSERT(!lli->lli_open_fd_exec_count);
1310
1311 if (lli->lli_mds_write_och)
1312 ll_md_real_close(inode, FMODE_WRITE);
1313 if (lli->lli_mds_exec_och)
1314 ll_md_real_close(inode, FMODE_EXEC);
1315 if (lli->lli_mds_read_och)
1316 ll_md_real_close(inode, FMODE_READ);
1317
a5cb8880 1318 if (S_ISLNK(inode->i_mode)) {
97903a26 1319 kfree(lli->lli_symlink_name);
d7e09d03
PT
1320 lli->lli_symlink_name = NULL;
1321 }
1322
7fc1f831
AP
1323 ll_xattr_cache_destroy(inode);
1324
d7e09d03 1325#ifdef CONFIG_FS_POSIX_ACL
24972f1e 1326 forget_all_cached_acls(inode);
341f1f0a 1327 if (lli->lli_posix_acl) {
d7e09d03
PT
1328 posix_acl_release(lli->lli_posix_acl);
1329 lli->lli_posix_acl = NULL;
1330 }
1331#endif
1332 lli->lli_inode_magic = LLI_INODE_DEAD;
1333
2de35386 1334 if (S_ISDIR(inode->i_mode))
1335 ll_dir_clear_lsm_md(inode);
c3397e7e 1336 if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
d7e09d03
PT
1337 LASSERT(list_empty(&lli->lli_agl_list));
1338
1339 /*
1340 * XXX This has to be done before lsm is freed below, because
1341 * cl_object still uses inode lsm.
1342 */
1343 cl_inode_fini(inode);
d7e09d03
PT
1344}
1345
b81f9b6d
OD
1346#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
1347
f28f1a45 1348static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
d7e09d03
PT
1349{
1350 struct lustre_md md;
2b0143b5 1351 struct inode *inode = d_inode(dentry);
d7e09d03
PT
1352 struct ll_sb_info *sbi = ll_i2sbi(inode);
1353 struct ptlrpc_request *request = NULL;
1354 int rc, ia_valid;
d7e09d03
PT
1355
1356 op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1357 LUSTRE_OPC_ANY, NULL);
1358 if (IS_ERR(op_data))
0a3bdb00 1359 return PTR_ERR(op_data);
d7e09d03 1360
f28f1a45 1361 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
d7e09d03
PT
1362 if (rc) {
1363 ptlrpc_req_finished(request);
1364 if (rc == -ENOENT) {
1365 clear_nlink(inode);
1366 /* Unlinked special device node? Or just a race?
c0894c6c
OD
1367 * Pretend we did everything.
1368 */
d7e09d03
PT
1369 if (!S_ISREG(inode->i_mode) &&
1370 !S_ISDIR(inode->i_mode)) {
1371 ia_valid = op_data->op_attr.ia_valid;
1372 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1373 rc = simple_setattr(dentry, &op_data->op_attr);
1374 op_data->op_attr.ia_valid = ia_valid;
1375 }
1376 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1377 CERROR("md_setattr fails: rc = %d\n", rc);
1378 }
0a3bdb00 1379 return rc;
d7e09d03
PT
1380 }
1381
1382 rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1383 sbi->ll_md_exp, &md);
1384 if (rc) {
1385 ptlrpc_req_finished(request);
0a3bdb00 1386 return rc;
d7e09d03
PT
1387 }
1388
251c4317 1389 ia_valid = op_data->op_attr.ia_valid;
ef2e0f55 1390 /* inode size will be in cl_setattr_ost, can't do it now since dirty
c0894c6c
OD
1391 * cache is not cleared yet.
1392 */
251c4317 1393 op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1ed32aed
JX
1394 if (S_ISREG(inode->i_mode))
1395 inode_lock(inode);
251c4317 1396 rc = simple_setattr(dentry, &op_data->op_attr);
1ed32aed
JX
1397 if (S_ISREG(inode->i_mode))
1398 inode_unlock(inode);
251c4317
JH
1399 op_data->op_attr.ia_valid = ia_valid;
1400
c3397e7e 1401 rc = ll_update_inode(inode, &md);
d7e09d03
PT
1402 ptlrpc_req_finished(request);
1403
0a3bdb00 1404 return rc;
d7e09d03
PT
1405}
1406
d7e09d03
PT
1407/* If this inode has objects allocated to it (lsm != NULL), then the OST
1408 * object(s) determine the file size and mtime. Otherwise, the MDS will
1409 * keep these values until such a time that objects are allocated for it.
1410 * We do the MDS operations first, as it is checking permissions for us.
1411 * We don't to the MDS RPC if there is nothing that we want to store there,
1412 * otherwise there is no harm in updating mtime/atime on the MDS if we are
1413 * going to do an RPC anyways.
1414 *
1415 * If we are doing a truncate, we will send the mtime and ctime updates
1416 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1417 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1418 * at the same time.
a720b790
JL
1419 *
1420 * In case of HSMimport, we only set attr on MDS.
d7e09d03 1421 */
a720b790 1422int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
d7e09d03 1423{
2b0143b5 1424 struct inode *inode = d_inode(dentry);
d7e09d03
PT
1425 struct ll_inode_info *lli = ll_i2info(inode);
1426 struct md_op_data *op_data = NULL;
0cd99931 1427 int rc = 0;
d7e09d03 1428
1ada25dc 1429 CDEBUG(D_VFSTRACE, "%s: setattr inode " DFID "(%p) from %llu to %llu, valid %x, hsm_import %d\n",
97a075cd
JN
1430 ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
1431 i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
d7e09d03
PT
1432
1433 if (attr->ia_valid & ATTR_SIZE) {
1434 /* Check new size against VFS/VM file size limit and rlimit */
1435 rc = inode_newsize_ok(inode, attr->ia_size);
1436 if (rc)
0a3bdb00 1437 return rc;
d7e09d03
PT
1438
1439 /* The maximum Lustre file size is variable, based on the
1440 * OST maximum object size and number of stripes. This
c0894c6c
OD
1441 * needs another check in addition to the VFS check above.
1442 */
d7e09d03 1443 if (attr->ia_size > ll_file_maxbytes(inode)) {
1ada25dc 1444 CDEBUG(D_INODE, "file " DFID " too large %llu > %llu\n",
d7e09d03
PT
1445 PFID(&lli->lli_fid), attr->ia_size,
1446 ll_file_maxbytes(inode));
0a3bdb00 1447 return -EFBIG;
d7e09d03
PT
1448 }
1449
1450 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1451 }
1452
31051c85 1453 /* POSIX: check before ATTR_*TIME_SET set (from setattr_prepare) */
d7e09d03 1454 if (attr->ia_valid & TIMES_SET_FLAGS) {
4b1a25f0 1455 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2eb90a75 1456 !capable(CFS_CAP_FOWNER))
0a3bdb00 1457 return -EPERM;
d7e09d03
PT
1458 }
1459
1460 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1461 if (attr->ia_valid & ATTR_CTIME) {
47f38c53 1462 attr->ia_ctime = current_time(inode);
d7e09d03
PT
1463 attr->ia_valid |= ATTR_CTIME_SET;
1464 }
1465 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1466 (attr->ia_valid & ATTR_ATIME)) {
47f38c53 1467 attr->ia_atime = current_time(inode);
d7e09d03
PT
1468 attr->ia_valid |= ATTR_ATIME_SET;
1469 }
1470 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1471 (attr->ia_valid & ATTR_MTIME)) {
47f38c53 1472 attr->ia_mtime = current_time(inode);
d7e09d03
PT
1473 attr->ia_valid |= ATTR_MTIME_SET;
1474 }
1475
1476 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
8d7eed54 1477 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %llu\n",
d7e09d03 1478 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
8d7eed54 1479 (s64)ktime_get_real_seconds());
d7e09d03 1480
1ed32aed 1481 if (S_ISREG(inode->i_mode))
5955102c 1482 inode_unlock(inode);
d7e09d03 1483
1ed32aed
JX
1484 /*
1485 * We always do an MDS RPC, even if we're only changing the size;
1486 * only the MDS knows whether truncate() should fail with -ETXTBUSY
5ea17d6c 1487 */
1ed32aed
JX
1488 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
1489 if (!op_data) {
1490 rc = -ENOMEM;
1491 goto out;
1492 }
5ea17d6c 1493
1ed32aed 1494 if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
55554f31 1495 /*
1ed32aed
JX
1496 * If we are changing file size, file content is
1497 * modified, flag it.
55554f31 1498 */
1ed32aed
JX
1499 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1500 op_data->op_bias |= MDS_DATA_MODIFIED;
1011487f 1501 clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
5ea17d6c
JL
1502 }
1503
1f0833a8
BJ
1504 op_data->op_attr = *attr;
1505
f28f1a45 1506 rc = ll_md_setattr(dentry, op_data);
d7e09d03 1507 if (rc)
34e1f2bb 1508 goto out;
d7e09d03 1509
1ed32aed 1510 if (!S_ISREG(inode->i_mode) || hsm_import) {
34e1f2bb
JL
1511 rc = 0;
1512 goto out;
1513 }
d7e09d03
PT
1514
1515 if (attr->ia_valid & (ATTR_SIZE |
1516 ATTR_ATIME | ATTR_ATIME_SET |
53bd4a00 1517 ATTR_MTIME | ATTR_MTIME_SET)) {
d7e09d03
PT
1518 /* For truncate and utimes sending attributes to OSTs, setting
1519 * mtime/atime to the past will be performed under PW [0:EOF]
1520 * extent lock (new_size:EOF for truncate). It may seem
1521 * excessive to send mtime/atime updates to OSTs when not
1522 * setting times to past, but it is necessary due to possible
c0894c6c
OD
1523 * time de-synchronization between MDT inode and OST objects
1524 */
933eb397 1525 rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, attr, 0);
53bd4a00 1526 }
1ed32aed
JX
1527
1528 /*
1529 * If the file was restored, it needs to set dirty flag.
1530 *
1531 * We've already sent MDS_DATA_MODIFIED flag in
1532 * ll_md_setattr() for truncate. However, the MDT refuses to
1533 * set the HS_DIRTY flag on released files, so we have to set
1534 * it again if the file has been restored. Please check how
1535 * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
1536 *
1537 * Please notice that if the file is not released, the previous
1538 * MDS_DATA_MODIFIED has taken effect and usually
1539 * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
1540 * This way we can save an RPC for common open + trunc
1541 * operation.
1542 */
1543 if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
1544 struct hsm_state_set hss = {
1545 .hss_valid = HSS_SETMASK,
1546 .hss_setmask = HS_DIRTY,
1547 };
1548 int rc2;
1549
1550 rc2 = ll_hsm_state_set(inode, &hss);
1011487f
BJ
1551 /*
1552 * truncate and write can happen at the same time, so that
1553 * the file can be set modified even though the file is not
1554 * restored from released state, and ll_hsm_state_set() is
1555 * not applicable for the file, and rc2 < 0 is normal in this
1556 * case.
1557 */
1ed32aed 1558 if (rc2 < 0)
1011487f 1559 CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
1ed32aed
JX
1560 PFID(ll_inode2fid(inode)), rc2);
1561 }
1562
d7e09d03 1563out:
0cd99931
JH
1564 if (op_data)
1565 ll_finish_md_op_data(op_data);
83d6b8fe 1566
1ed32aed 1567 if (S_ISREG(inode->i_mode)) {
5955102c 1568 inode_lock(inode);
a720b790 1569 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
d7e09d03
PT
1570 inode_dio_wait(inode);
1571 }
1572
1573 ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1574 LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1575
251c4317 1576 return rc;
d7e09d03
PT
1577}
1578
1579int ll_setattr(struct dentry *de, struct iattr *attr)
1580{
2b0143b5 1581 int mode = d_inode(de)->i_mode;
d7e09d03 1582
cd94f231
OD
1583 if ((attr->ia_valid & (ATTR_CTIME | ATTR_SIZE | ATTR_MODE)) ==
1584 (ATTR_CTIME | ATTR_SIZE | ATTR_MODE))
d7e09d03
PT
1585 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1586
cd94f231
OD
1587 if (((attr->ia_valid & (ATTR_MODE | ATTR_FORCE | ATTR_SIZE)) ==
1588 (ATTR_SIZE | ATTR_MODE)) &&
d7e09d03 1589 (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
52048862 1590 (((mode & (S_ISGID | 0010)) == (S_ISGID | 0010)) &&
d7e09d03
PT
1591 !(attr->ia_mode & S_ISGID))))
1592 attr->ia_valid |= ATTR_FORCE;
1593
98639249
NC
1594 if ((attr->ia_valid & ATTR_MODE) &&
1595 (mode & S_ISUID) &&
d7e09d03
PT
1596 !(attr->ia_mode & S_ISUID) &&
1597 !(attr->ia_valid & ATTR_KILL_SUID))
1598 attr->ia_valid |= ATTR_KILL_SUID;
1599
98639249 1600 if ((attr->ia_valid & ATTR_MODE) &&
52048862 1601 ((mode & (S_ISGID | 0010)) == (S_ISGID | 0010)) &&
d7e09d03
PT
1602 !(attr->ia_mode & S_ISGID) &&
1603 !(attr->ia_valid & ATTR_KILL_SGID))
1604 attr->ia_valid |= ATTR_KILL_SGID;
1605
a720b790 1606 return ll_setattr_raw(de, attr, false);
d7e09d03
PT
1607}
1608
1609int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1610 __u64 max_age, __u32 flags)
1611{
1612 struct ll_sb_info *sbi = ll_s2sbi(sb);
1613 struct obd_statfs obd_osfs;
1614 int rc;
d7e09d03
PT
1615
1616 rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1617 if (rc) {
1618 CERROR("md_statfs fails: rc = %d\n", rc);
0a3bdb00 1619 return rc;
d7e09d03
PT
1620 }
1621
1622 osfs->os_type = sb->s_magic;
1623
b0f5aad5 1624 CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
1d8cb70c
GD
1625 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,
1626 osfs->os_files);
d7e09d03
PT
1627
1628 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1629 flags |= OBD_STATFS_NODELAY;
1630
1631 rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1632 if (rc) {
1633 CERROR("obd_statfs fails: rc = %d\n", rc);
0a3bdb00 1634 return rc;
d7e09d03
PT
1635 }
1636
b0f5aad5 1637 CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
d7e09d03
PT
1638 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1639 obd_osfs.os_files);
1640
1641 osfs->os_bsize = obd_osfs.os_bsize;
1642 osfs->os_blocks = obd_osfs.os_blocks;
1643 osfs->os_bfree = obd_osfs.os_bfree;
1644 osfs->os_bavail = obd_osfs.os_bavail;
1645
1646 /* If we don't have as many objects free on the OST as inodes
1647 * on the MDS, we reduce the total number of inodes to
1648 * compensate, so that the "inodes in use" number is correct.
1649 */
1650 if (obd_osfs.os_ffree < osfs->os_ffree) {
1651 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1652 obd_osfs.os_ffree;
1653 osfs->os_ffree = obd_osfs.os_ffree;
1654 }
1655
0a3bdb00 1656 return rc;
d7e09d03 1657}
c9f6bb96 1658
d7e09d03
PT
1659int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1660{
1661 struct super_block *sb = de->d_sb;
1662 struct obd_statfs osfs;
1663 int rc;
1664
b0f5aad5 1665 CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64());
d7e09d03
PT
1666 ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1667
1668 /* Some amount of caching on the client is allowed */
1669 rc = ll_statfs_internal(sb, &osfs,
1670 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1671 0);
1672 if (rc)
1673 return rc;
1674
1675 statfs_unpack(sfs, &osfs);
1676
1677 /* We need to downshift for all 32-bit kernels, because we can't
1678 * tell if the kernel is being called via sys_statfs64() or not.
1679 * Stop before overflowing f_bsize - in which case it is better
c0894c6c
OD
1680 * to just risk EOVERFLOW if caller is using old sys_statfs().
1681 */
d7e09d03
PT
1682 if (sizeof(long) < 8) {
1683 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1684 sfs->f_bsize <<= 1;
1685
1686 osfs.os_blocks >>= 1;
1687 osfs.os_bfree >>= 1;
1688 osfs.os_bavail >>= 1;
1689 }
1690 }
1691
1692 sfs->f_blocks = osfs.os_blocks;
1693 sfs->f_bfree = osfs.os_bfree;
1694 sfs->f_bavail = osfs.os_bavail;
bd994071 1695 sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
d7e09d03
PT
1696 return 0;
1697}
1698
1699void ll_inode_size_lock(struct inode *inode)
1700{
1701 struct ll_inode_info *lli;
1702
1703 LASSERT(!S_ISDIR(inode->i_mode));
1704
1705 lli = ll_i2info(inode);
47a57bde 1706 mutex_lock(&lli->lli_size_mutex);
d7e09d03
PT
1707}
1708
1709void ll_inode_size_unlock(struct inode *inode)
1710{
1711 struct ll_inode_info *lli;
1712
1713 lli = ll_i2info(inode);
47a57bde 1714 mutex_unlock(&lli->lli_size_mutex);
d7e09d03
PT
1715}
1716
c3397e7e 1717int ll_update_inode(struct inode *inode, struct lustre_md *md)
d7e09d03
PT
1718{
1719 struct ll_inode_info *lli = ll_i2info(inode);
1720 struct mdt_body *body = md->body;
d7e09d03
PT
1721 struct ll_sb_info *sbi = ll_i2sbi(inode);
1722
55051039 1723 if (body->mbo_valid & OBD_MD_FLEASIZE)
85cb63bc 1724 cl_file_inode_init(inode, md);
d7e09d03 1725
c3397e7e 1726 if (S_ISDIR(inode->i_mode)) {
1727 int rc;
1728
1729 rc = ll_update_lsm_md(inode, md);
1730 if (rc)
1731 return rc;
1732 }
2de35386 1733
d7e09d03 1734#ifdef CONFIG_FS_POSIX_ACL
2e1b5b8b 1735 if (body->mbo_valid & OBD_MD_FLACL) {
d7e09d03
PT
1736 spin_lock(&lli->lli_lock);
1737 if (lli->lli_posix_acl)
1738 posix_acl_release(lli->lli_posix_acl);
1739 lli->lli_posix_acl = md->posix_acl;
1740 spin_unlock(&lli->lli_lock);
1741 }
1742#endif
2e1b5b8b 1743 inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
c1e2699d 1744 sbi->ll_flags & LL_SBI_32BIT_API);
2e1b5b8b 1745 inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
d7e09d03 1746
2e1b5b8b
JH
1747 if (body->mbo_valid & OBD_MD_FLATIME) {
1748 if (body->mbo_atime > LTIME_S(inode->i_atime))
1749 LTIME_S(inode->i_atime) = body->mbo_atime;
1750 lli->lli_atime = body->mbo_atime;
d7e09d03 1751 }
2e1b5b8b
JH
1752 if (body->mbo_valid & OBD_MD_FLMTIME) {
1753 if (body->mbo_mtime > LTIME_S(inode->i_mtime)) {
9f11748c
AG
1754 CDEBUG(D_INODE,
1755 "setting ino %lu mtime from %lu to %llu\n",
b0f5aad5 1756 inode->i_ino, LTIME_S(inode->i_mtime),
2e1b5b8b
JH
1757 body->mbo_mtime);
1758 LTIME_S(inode->i_mtime) = body->mbo_mtime;
d7e09d03 1759 }
2e1b5b8b 1760 lli->lli_mtime = body->mbo_mtime;
d7e09d03 1761 }
2e1b5b8b
JH
1762 if (body->mbo_valid & OBD_MD_FLCTIME) {
1763 if (body->mbo_ctime > LTIME_S(inode->i_ctime))
1764 LTIME_S(inode->i_ctime) = body->mbo_ctime;
1765 lli->lli_ctime = body->mbo_ctime;
d7e09d03 1766 }
2e1b5b8b 1767 if (body->mbo_valid & OBD_MD_FLMODE)
cd94f231
OD
1768 inode->i_mode = (inode->i_mode & S_IFMT) |
1769 (body->mbo_mode & ~S_IFMT);
2e1b5b8b 1770 if (body->mbo_valid & OBD_MD_FLTYPE)
cd94f231
OD
1771 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1772 (body->mbo_mode & S_IFMT);
d7e09d03 1773 LASSERT(inode->i_mode != 0);
566be54d 1774 if (S_ISREG(inode->i_mode))
e6768831
TJ
1775 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
1776 LL_MAX_BLKSIZE_BITS);
566be54d 1777 else
d7e09d03 1778 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
2e1b5b8b
JH
1779 if (body->mbo_valid & OBD_MD_FLUID)
1780 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
1781 if (body->mbo_valid & OBD_MD_FLGID)
1782 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
1783 if (body->mbo_valid & OBD_MD_FLFLAGS)
1784 inode->i_flags = ll_ext_to_inode_flags(body->mbo_flags);
1785 if (body->mbo_valid & OBD_MD_FLNLINK)
1786 set_nlink(inode, body->mbo_nlink);
1787 if (body->mbo_valid & OBD_MD_FLRDEV)
1788 inode->i_rdev = old_decode_dev(body->mbo_rdev);
1789
1790 if (body->mbo_valid & OBD_MD_FLID) {
d7e09d03
PT
1791 /* FID shouldn't be changed! */
1792 if (fid_is_sane(&lli->lli_fid)) {
2e1b5b8b 1793 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
1ada25dc 1794 "Trying to change FID " DFID " to the " DFID ", inode " DFID "(%p)\n",
2e1b5b8b 1795 PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
97a075cd 1796 PFID(ll_inode2fid(inode)), inode);
da5ecb4d 1797 } else {
2e1b5b8b 1798 lli->lli_fid = body->mbo_fid1;
da5ecb4d 1799 }
d7e09d03
PT
1800 }
1801
1802 LASSERT(fid_seq(&lli->lli_fid) != 0);
1803
2e1b5b8b 1804 if (body->mbo_valid & OBD_MD_FLSIZE) {
0cd99931 1805 i_size_write(inode, body->mbo_size);
d7e09d03 1806
0cd99931
JH
1807 CDEBUG(D_VFSTRACE, "inode=" DFID ", updating i_size %llu\n",
1808 PFID(ll_inode2fid(inode)),
1809 (unsigned long long)body->mbo_size);
d7e09d03 1810
2e1b5b8b
JH
1811 if (body->mbo_valid & OBD_MD_FLBLOCKS)
1812 inode->i_blocks = body->mbo_blocks;
d7e09d03
PT
1813 }
1814
2e1b5b8b
JH
1815 if (body->mbo_valid & OBD_MD_TSTATE) {
1816 if (body->mbo_t_state & MS_RESTORE)
219c0c45 1817 set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
5ea17d6c 1818 }
c3397e7e 1819
1820 return 0;
d7e09d03
PT
1821}
1822
c3397e7e 1823int ll_read_inode2(struct inode *inode, void *opaque)
d7e09d03
PT
1824{
1825 struct lustre_md *md = opaque;
1826 struct ll_inode_info *lli = ll_i2info(inode);
c3397e7e 1827 int rc;
d7e09d03 1828
1ada25dc 1829 CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
d7e09d03
PT
1830 PFID(&lli->lli_fid), inode);
1831
d7e09d03
PT
1832 /* Core attributes from the MDS first. This is a new inode, and
1833 * the VFS doesn't zero times in the core inode so we have to do
1834 * it ourselves. They will be overwritten by either MDS or OST
c0894c6c
OD
1835 * attributes - we just need to make sure they aren't newer.
1836 */
d7e09d03
PT
1837 LTIME_S(inode->i_mtime) = 0;
1838 LTIME_S(inode->i_atime) = 0;
1839 LTIME_S(inode->i_ctime) = 0;
1840 inode->i_rdev = 0;
c3397e7e 1841 rc = ll_update_inode(inode, md);
1842 if (rc)
1843 return rc;
d7e09d03
PT
1844
1845 /* OIDEBUG(inode); */
1846
d7e09d03
PT
1847 if (S_ISREG(inode->i_mode)) {
1848 struct ll_sb_info *sbi = ll_i2sbi(inode);
cf29a7b6 1849
d7e09d03
PT
1850 inode->i_op = &ll_file_inode_operations;
1851 inode->i_fop = sbi->ll_fop;
1852 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
d7e09d03
PT
1853 } else if (S_ISDIR(inode->i_mode)) {
1854 inode->i_op = &ll_dir_inode_operations;
1855 inode->i_fop = &ll_dir_operations;
d7e09d03
PT
1856 } else if (S_ISLNK(inode->i_mode)) {
1857 inode->i_op = &ll_fast_symlink_inode_operations;
d7e09d03
PT
1858 } else {
1859 inode->i_op = &ll_special_inode_operations;
1860
1861 init_special_inode(inode, inode->i_mode,
1862 inode->i_rdev);
d7e09d03 1863 }
c3397e7e 1864
1865 return 0;
d7e09d03
PT
1866}
1867
1868void ll_delete_inode(struct inode *inode)
1869{
1929c433 1870 struct ll_inode_info *lli = ll_i2info(inode);
d7e09d03 1871
6e16818b 1872 if (S_ISREG(inode->i_mode) && lli->lli_clob)
d7e09d03 1873 /* discard all dirty pages before truncating them, required by
c0894c6c
OD
1874 * osc_extent implementation at LU-1030.
1875 */
65fb55d1 1876 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
7510c5ca 1877 CL_FSYNC_LOCAL, 1);
d7e09d03 1878
91b0abe3 1879 truncate_inode_pages_final(&inode->i_data);
d7e09d03 1880
7510c5ca
YS
1881 LASSERTF(!inode->i_data.nrpages,
1882 "inode=" DFID "(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
1883 PFID(ll_inode2fid(inode)), inode, inode->i_data.nrpages);
d7e09d03
PT
1884
1885 ll_clear_inode(inode);
1886 clear_inode(inode);
d7e09d03
PT
1887}
1888
1889int ll_iocontrol(struct inode *inode, struct file *file,
1890 unsigned int cmd, unsigned long arg)
1891{
1892 struct ll_sb_info *sbi = ll_i2sbi(inode);
1893 struct ptlrpc_request *req = NULL;
1894 int rc, flags = 0;
d7e09d03 1895
a58a38ac 1896 switch (cmd) {
d7e09d03
PT
1897 case FSFILT_IOC_GETFLAGS: {
1898 struct mdt_body *body;
1899 struct md_op_data *op_data;
1900
1901 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1902 0, 0, LUSTRE_OPC_ANY,
1903 NULL);
1904 if (IS_ERR(op_data))
0a3bdb00 1905 return PTR_ERR(op_data);
d7e09d03
PT
1906
1907 op_data->op_valid = OBD_MD_FLFLAGS;
1908 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1909 ll_finish_md_op_data(op_data);
1910 if (rc) {
1ada25dc 1911 CERROR("%s: failure inode " DFID ": rc = %d\n",
97a075cd
JN
1912 sbi->ll_md_exp->exp_obd->obd_name,
1913 PFID(ll_inode2fid(inode)), rc);
0a3bdb00 1914 return -abs(rc);
d7e09d03
PT
1915 }
1916
1917 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1918
2e1b5b8b 1919 flags = body->mbo_flags;
d7e09d03
PT
1920
1921 ptlrpc_req_finished(req);
1922
7ac5db21 1923 return put_user(flags, (int __user *)arg);
d7e09d03
PT
1924 }
1925 case FSFILT_IOC_SETFLAGS: {
d7e09d03 1926 struct md_op_data *op_data;
933eb397
JH
1927 struct cl_object *obj;
1928 struct iattr *attr;
d7e09d03 1929
7ac5db21 1930 if (get_user(flags, (int __user *)arg))
0a3bdb00 1931 return -EFAULT;
d7e09d03
PT
1932
1933 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1934 LUSTRE_OPC_ANY, NULL);
1935 if (IS_ERR(op_data))
0a3bdb00 1936 return PTR_ERR(op_data);
d7e09d03 1937
bb41292b 1938 op_data->op_attr_flags = flags;
d7e09d03 1939 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
f28f1a45 1940 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
d7e09d03
PT
1941 ll_finish_md_op_data(op_data);
1942 ptlrpc_req_finished(req);
1943 if (rc)
0a3bdb00 1944 return rc;
d7e09d03
PT
1945
1946 inode->i_flags = ll_ext_to_inode_flags(flags);
1947
933eb397
JH
1948 obj = ll_i2info(inode)->lli_clob;
1949 if (!obj)
0a3bdb00 1950 return 0;
d7e09d03 1951
933eb397
JH
1952 attr = kzalloc(sizeof(*attr), GFP_NOFS);
1953 if (!attr)
0a3bdb00 1954 return -ENOMEM;
d7e09d03 1955
933eb397
JH
1956 attr->ia_valid = ATTR_ATTR_FLAG;
1957 rc = cl_setattr_ost(obj, attr, flags);
1958 kfree(attr);
0a3bdb00 1959 return rc;
d7e09d03
PT
1960 }
1961 default:
0a3bdb00 1962 return -ENOSYS;
d7e09d03
PT
1963 }
1964
0a3bdb00 1965 return 0;
d7e09d03
PT
1966}
1967
1968int ll_flush_ctx(struct inode *inode)
1969{
1970 struct ll_sb_info *sbi = ll_i2sbi(inode);
1971
4b1a25f0 1972 CDEBUG(D_SEC, "flush context for user %d\n",
e15ba45d 1973 from_kuid(&init_user_ns, current_uid()));
d7e09d03
PT
1974
1975 obd_set_info_async(NULL, sbi->ll_md_exp,
1976 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1977 0, NULL, NULL);
1978 obd_set_info_async(NULL, sbi->ll_dt_exp,
1979 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1980 0, NULL, NULL);
1981 return 0;
1982}
1983
1984/* umount -f client means force down, don't save state */
1985void ll_umount_begin(struct super_block *sb)
1986{
1987 struct ll_sb_info *sbi = ll_s2sbi(sb);
1988 struct obd_device *obd;
1989 struct obd_ioctl_data *ioc_data;
3f4f7824
RD
1990 wait_queue_head_t waitq;
1991 struct l_wait_info lwi;
d7e09d03
PT
1992
1993 CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
1994 sb->s_count, atomic_read(&sb->s_active));
1995
1996 obd = class_exp2obd(sbi->ll_md_exp);
6e16818b 1997 if (!obd) {
55f5a824 1998 CERROR("Invalid MDC connection handle %#llx\n",
d7e09d03 1999 sbi->ll_md_exp->exp_handle.h_cookie);
d7e09d03
PT
2000 return;
2001 }
2002 obd->obd_force = 1;
2003
2004 obd = class_exp2obd(sbi->ll_dt_exp);
6e16818b 2005 if (!obd) {
55f5a824 2006 CERROR("Invalid LOV connection handle %#llx\n",
d7e09d03 2007 sbi->ll_dt_exp->exp_handle.h_cookie);
d7e09d03
PT
2008 return;
2009 }
2010 obd->obd_force = 1;
2011
496a51bd 2012 ioc_data = kzalloc(sizeof(*ioc_data), GFP_NOFS);
d7e09d03
PT
2013 if (ioc_data) {
2014 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
ec83e611 2015 sizeof(*ioc_data), ioc_data, NULL);
d7e09d03
PT
2016
2017 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
ec83e611 2018 sizeof(*ioc_data), ioc_data, NULL);
d7e09d03 2019
97903a26 2020 kfree(ioc_data);
d7e09d03
PT
2021 }
2022
d7e09d03 2023 /* Really, we'd like to wait until there are no requests outstanding,
3f4f7824
RD
2024 * and then continue. For now, we just periodically checking for vfs
2025 * to decrement mnt_cnt and hope to finish it within 10sec.
d7e09d03 2026 */
3f4f7824
RD
2027 init_waitqueue_head(&waitq);
2028 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(10),
2029 cfs_time_seconds(1), NULL, NULL);
2030 l_wait_event(waitq, may_umount(sbi->ll_mnt.mnt), &lwi);
2031
d7e09d03 2032 schedule();
d7e09d03
PT
2033}
2034
2035int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2036{
2037 struct ll_sb_info *sbi = ll_s2sbi(sb);
2038 char *profilenm = get_profile_name(sb);
2039 int err;
2040 __u32 read_only;
2041
1751e8a6
LT
2042 if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
2043 read_only = *flags & SB_RDONLY;
d7e09d03
PT
2044 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2045 sizeof(KEY_READ_ONLY),
2046 KEY_READ_ONLY, sizeof(read_only),
2047 &read_only, NULL);
2048 if (err) {
2049 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2050 profilenm, read_only ?
2051 "read-only" : "read-write", err);
2052 return err;
2053 }
2054
2055 if (read_only)
1751e8a6 2056 sb->s_flags |= SB_RDONLY;
d7e09d03 2057 else
1751e8a6 2058 sb->s_flags &= ~SB_RDONLY;
d7e09d03
PT
2059
2060 if (sbi->ll_flags & LL_SBI_VERBOSE)
2061 LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2062 read_only ? "read-only" : "read-write");
2063 }
2064 return 0;
2065}
2066
44ecac68
FY
2067/**
2068 * Cleanup the open handle that is cached on MDT-side.
2069 *
2070 * For open case, the client side open handling thread may hit error
2071 * after the MDT grant the open. Under such case, the client should
2072 * send close RPC to the MDT as cleanup; otherwise, the open handle
2073 * on the MDT will be leaked there until the client umount or evicted.
2074 *
2075 * In further, if someone unlinked the file, because the open handle
2076 * holds the reference on such file/object, then it will block the
2077 * subsequent threads that want to locate such object via FID.
2078 *
2079 * \param[in] sb super block for this file-system
2080 * \param[in] open_req pointer to the original open request
2081 */
2082void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2083{
2084 struct mdt_body *body;
2085 struct md_op_data *op_data;
2086 struct ptlrpc_request *close_req = NULL;
2087 struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp;
2088
2089 body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
af13af52 2090 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
83ea341d 2091 if (!op_data)
44ecac68 2092 return;
44ecac68 2093
2e1b5b8b 2094 op_data->op_fid1 = body->mbo_fid1;
2e1b5b8b 2095 op_data->op_handle = body->mbo_handle;
44ecac68
FY
2096 op_data->op_mod_time = get_seconds();
2097 md_close(exp, op_data, NULL, &close_req);
2098 ptlrpc_req_finished(close_req);
2099 ll_finish_md_op_data(op_data);
2100}
2101
d7e09d03
PT
2102int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2103 struct super_block *sb, struct lookup_intent *it)
2104{
2105 struct ll_sb_info *sbi = NULL;
24af3e16 2106 struct lustre_md md = { NULL };
d7e09d03 2107 int rc;
d7e09d03
PT
2108
2109 LASSERT(*inode || sb);
2110 sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2111 rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2112 sbi->ll_md_exp, &md);
2113 if (rc)
44ecac68 2114 goto cleanup;
d7e09d03
PT
2115
2116 if (*inode) {
c3397e7e 2117 rc = ll_update_inode(*inode, &md);
2118 if (rc)
2119 goto out;
d7e09d03 2120 } else {
6e16818b 2121 LASSERT(sb);
d7e09d03
PT
2122
2123 /*
2124 * At this point server returns to client's same fid as client
2125 * generated for creating. So using ->fid1 is okay here.
2126 */
2e1b5b8b 2127 if (!fid_is_sane(&md.body->mbo_fid1)) {
c681528a
SC
2128 CERROR("%s: Fid is insane " DFID "\n",
2129 ll_get_fsname(sb, NULL, 0),
2e1b5b8b 2130 PFID(&md.body->mbo_fid1));
c681528a
SC
2131 rc = -EINVAL;
2132 goto out;
2133 }
d7e09d03 2134
2e1b5b8b 2135 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
c1e2699d 2136 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03 2137 &md);
c3397e7e 2138 if (IS_ERR(*inode)) {
d7e09d03
PT
2139#ifdef CONFIG_FS_POSIX_ACL
2140 if (md.posix_acl) {
2141 posix_acl_release(md.posix_acl);
2142 md.posix_acl = NULL;
2143 }
2144#endif
020ecc6f 2145 rc = -ENOMEM;
d7e09d03 2146 CERROR("new_inode -fatal: rc %d\n", rc);
34e1f2bb 2147 goto out;
d7e09d03
PT
2148 }
2149 }
2150
2151 /* Handling piggyback layout lock.
2152 * Layout lock can be piggybacked by getattr and open request.
2153 * The lsm can be applied to inode only if it comes with a layout lock
2154 * otherwise correct layout may be overwritten, for example:
2155 * 1. proc1: mdt returns a lsm but not granting layout
2156 * 2. layout was changed by another client
2157 * 3. proc2: refresh layout and layout lock granted
c0894c6c
OD
2158 * 4. proc1: to apply a stale layout
2159 */
e476f2e5 2160 if (it && it->it_lock_mode != 0) {
d7e09d03
PT
2161 struct lustre_handle lockh;
2162 struct ldlm_lock *lock;
2163
e476f2e5 2164 lockh.cookie = it->it_lock_handle;
d7e09d03 2165 lock = ldlm_handle2lock(&lockh);
6e16818b 2166 LASSERT(lock);
d7e09d03
PT
2167 if (ldlm_has_layout(lock)) {
2168 struct cl_object_conf conf;
2169
2170 memset(&conf, 0, sizeof(conf));
2171 conf.coc_opc = OBJECT_CONF_SET;
2172 conf.coc_inode = *inode;
2173 conf.coc_lock = lock;
55051039 2174 conf.u.coc_layout = md.layout;
d7e09d03
PT
2175 (void)ll_layout_conf(*inode, &conf);
2176 }
2177 LDLM_LOCK_PUT(lock);
2178 }
2179
2180out:
d7e09d03 2181 md_free_lustre_md(sbi->ll_md_exp, &md);
44ecac68
FY
2182cleanup:
2183 if (rc != 0 && it && it->it_op & IT_OPEN)
2184 ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
2185
0a3bdb00 2186 return rc;
d7e09d03
PT
2187}
2188
4c6243ec 2189int ll_obd_statfs(struct inode *inode, void __user *arg)
d7e09d03
PT
2190{
2191 struct ll_sb_info *sbi = NULL;
2192 struct obd_export *exp;
2193 char *buf = NULL;
2194 struct obd_ioctl_data *data = NULL;
2195 __u32 type;
d7e09d03
PT
2196 int len = 0, rc;
2197
c650ba73
TR
2198 if (!inode) {
2199 rc = -EINVAL;
2200 goto out_statfs;
2201 }
2202
2203 sbi = ll_i2sbi(inode);
2204 if (!sbi) {
34e1f2bb
JL
2205 rc = -EINVAL;
2206 goto out_statfs;
2207 }
d7e09d03
PT
2208
2209 rc = obd_ioctl_getdata(&buf, &len, arg);
2210 if (rc)
34e1f2bb 2211 goto out_statfs;
d7e09d03 2212
bdbb0512 2213 data = (void *)buf;
d7e09d03 2214 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
34e1f2bb
JL
2215 !data->ioc_pbuf1 || !data->ioc_pbuf2) {
2216 rc = -EINVAL;
2217 goto out_statfs;
2218 }
d7e09d03
PT
2219
2220 if (data->ioc_inllen1 != sizeof(__u32) ||
2221 data->ioc_inllen2 != sizeof(__u32) ||
2222 data->ioc_plen1 != sizeof(struct obd_statfs) ||
34e1f2bb
JL
2223 data->ioc_plen2 != sizeof(struct obd_uuid)) {
2224 rc = -EINVAL;
2225 goto out_statfs;
2226 }
d7e09d03
PT
2227
2228 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
da5ecb4d 2229 if (type & LL_STATFS_LMV) {
d7e09d03 2230 exp = sbi->ll_md_exp;
da5ecb4d 2231 } else if (type & LL_STATFS_LOV) {
d7e09d03 2232 exp = sbi->ll_dt_exp;
da5ecb4d 2233 } else {
34e1f2bb
JL
2234 rc = -ENODEV;
2235 goto out_statfs;
2236 }
d7e09d03 2237
44164fc9 2238 rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
d7e09d03 2239 if (rc)
34e1f2bb 2240 goto out_statfs;
d7e09d03 2241out_statfs:
bb44b987 2242 kvfree(buf);
d7e09d03
PT
2243 return rc;
2244}
2245
2246int ll_process_config(struct lustre_cfg *lcfg)
2247{
2248 char *ptr;
2249 void *sb;
2250 struct lprocfs_static_vars lvars;
2251 unsigned long x;
2252 int rc = 0;
2253
2254 lprocfs_llite_init_vars(&lvars);
2255
2256 /* The instance name contains the sb: lustre-client-aacfe000 */
2257 ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2258 if (!ptr || !*(++ptr))
2259 return -EINVAL;
692f2b6c 2260 rc = kstrtoul(ptr, 16, &x);
2261 if (rc != 0)
d7e09d03
PT
2262 return -EINVAL;
2263 sb = (void *)x;
2264 /* This better be a real Lustre superblock! */
9f11748c
AG
2265 LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic ==
2266 LMD_MAGIC);
d7e09d03
PT
2267
2268 /* Note we have not called client_common_fill_super yet, so
c0894c6c
OD
2269 * proc fns must be able to handle that!
2270 */
d7e09d03
PT
2271 rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2272 lcfg, sb);
2273 if (rc > 0)
2274 rc = 0;
fbe7c6c7 2275 return rc;
d7e09d03
PT
2276}
2277
2278/* this function prepares md_op_data hint for passing ot down to MD stack. */
aff9d8e8 2279struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
e15ba45d 2280 struct inode *i1, struct inode *i2,
930f60e6
DE
2281 const char *name, size_t namelen,
2282 u32 mode, __u32 opc, void *data)
d7e09d03 2283{
d097d67b
JH
2284 if (!name) {
2285 /* Do not reuse namelen for something else. */
2286 if (namelen)
2287 return ERR_PTR(-EINVAL);
2288 } else {
2289 if (namelen > ll_i2sbi(i1)->ll_namelen)
2290 return ERR_PTR(-ENAMETOOLONG);
2291
2292 if (!lu_name_is_valid_2(name, namelen))
2293 return ERR_PTR(-EINVAL);
2294 }
d7e09d03 2295
6e16818b 2296 if (!op_data)
496a51bd 2297 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
d7e09d03 2298
6e16818b 2299 if (!op_data)
d7e09d03
PT
2300 return ERR_PTR(-ENOMEM);
2301
2302 ll_i2gids(op_data->op_suppgids, i1, i2);
2303 op_data->op_fid1 = *ll_inode2fid(i1);
d81e9009 2304 op_data->op_default_stripe_offset = -1;
2305 if (S_ISDIR(i1->i_mode)) {
2de35386 2306 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
d0d3caae 2307 if (opc == LUSTRE_OPC_MKDIR)
2308 op_data->op_default_stripe_offset =
2309 ll_i2info(i1)->lli_def_stripe_offset;
d81e9009 2310 }
d7e09d03 2311
1c12cf63 2312 if (i2) {
d7e09d03 2313 op_data->op_fid2 = *ll_inode2fid(i2);
1c12cf63 2314 if (S_ISDIR(i2->i_mode))
2de35386 2315 op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
1c12cf63 2316 } else {
d7e09d03 2317 fid_zero(&op_data->op_fid2);
1c12cf63 2318 }
2319
2320 if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
2321 op_data->op_cli_flags |= CLI_HASH64;
2322
2323 if (ll_need_32bit_api(ll_i2sbi(i1)))
2324 op_data->op_cli_flags |= CLI_API32;
d7e09d03
PT
2325
2326 op_data->op_name = name;
2327 op_data->op_namelen = namelen;
2328 op_data->op_mode = mode;
14e3f92a 2329 op_data->op_mod_time = ktime_get_real_seconds();
4b1a25f0
PT
2330 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2331 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
d7e09d03 2332 op_data->op_cap = cfs_curproc_cap_pack();
6e16818b 2333 if ((opc == LUSTRE_OPC_CREATE) && name &&
1d62e09c 2334 filename_is_volatile(name, namelen, &op_data->op_mds))
d7e09d03 2335 op_data->op_bias |= MDS_CREATE_VOLATILE;
1d62e09c 2336 else
2337 op_data->op_mds = 0;
d7e09d03
PT
2338 op_data->op_data = data;
2339
d7e09d03
PT
2340 return op_data;
2341}
2342
2343void ll_finish_md_op_data(struct md_op_data *op_data)
2344{
97903a26 2345 kfree(op_data);
d7e09d03
PT
2346}
2347
2348int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2349{
2350 struct ll_sb_info *sbi;
2351
6e16818b 2352 LASSERT(seq && dentry);
d7e09d03
PT
2353 sbi = ll_s2sbi(dentry->d_sb);
2354
2355 if (sbi->ll_flags & LL_SBI_NOLCK)
2356 seq_puts(seq, ",nolock");
2357
2358 if (sbi->ll_flags & LL_SBI_FLOCK)
2359 seq_puts(seq, ",flock");
2360
2361 if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2362 seq_puts(seq, ",localflock");
2363
2364 if (sbi->ll_flags & LL_SBI_USER_XATTR)
2365 seq_puts(seq, ",user_xattr");
2366
2367 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2368 seq_puts(seq, ",lazystatfs");
2369
2370 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2371 seq_puts(seq, ",user_fid2path");
2372
bfb9944c
WW
2373 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
2374 seq_puts(seq, ",always_ping");
2375
0a3bdb00 2376 return 0;
d7e09d03
PT
2377}
2378
2379/**
2380 * Get obd name by cmd, and copy out to user space
2381 */
2382int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2383{
2384 struct ll_sb_info *sbi = ll_i2sbi(inode);
2385 struct obd_device *obd;
d7e09d03
PT
2386
2387 if (cmd == OBD_IOC_GETDTNAME)
2388 obd = class_exp2obd(sbi->ll_dt_exp);
2389 else if (cmd == OBD_IOC_GETMDNAME)
2390 obd = class_exp2obd(sbi->ll_md_exp);
2391 else
0a3bdb00 2392 return -EINVAL;
d7e09d03
PT
2393
2394 if (!obd)
0a3bdb00 2395 return -ENOENT;
d7e09d03 2396
7ac5db21
OD
2397 if (copy_to_user((void __user *)arg, obd->obd_name,
2398 strlen(obd->obd_name) + 1))
0a3bdb00 2399 return -EFAULT;
d7e09d03 2400
0a3bdb00 2401 return 0;
d7e09d03
PT
2402}
2403
2404/**
2405 * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2406 * fsname will be returned in this buffer; otherwise, a static buffer will be
2407 * used to store the fsname and returned to caller.
2408 */
2409char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2410{
2411 static char fsname_static[MTI_NAME_MAXLEN];
2412 struct lustre_sb_info *lsi = s2lsi(sb);
2413 char *ptr;
2414 int len;
2415
6e16818b 2416 if (!buf) {
d7e09d03
PT
2417 /* this means the caller wants to use static buffer
2418 * and it doesn't care about race. Usually this is
c0894c6c
OD
2419 * in error reporting path
2420 */
d7e09d03
PT
2421 buf = fsname_static;
2422 buflen = sizeof(fsname_static);
2423 }
2424
2425 len = strlen(lsi->lsi_lmd->lmd_profile);
2426 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2427 if (ptr && (strcmp(ptr, "-client") == 0))
2428 len -= 7;
2429
2430 if (unlikely(len >= buflen))
2431 len = buflen - 1;
2432 strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2433 buf[len] = '\0';
2434
2435 return buf;
2436}
2437
d7e09d03
PT
2438void ll_dirty_page_discard_warn(struct page *page, int ioret)
2439{
2440 char *buf, *path = NULL;
2441 struct dentry *dentry = NULL;
8c7b0e1a 2442 struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
d7e09d03
PT
2443
2444 /* this can be called inside spin lock so use GFP_ATOMIC. */
2445 buf = (char *)__get_free_page(GFP_ATOMIC);
6e16818b 2446 if (buf) {
d7e09d03 2447 dentry = d_find_alias(page->mapping->host);
6e16818b 2448 if (dentry)
1ad581eb 2449 path = dentry_path_raw(dentry, buf, PAGE_SIZE);
d7e09d03
PT
2450 }
2451
73b89907 2452 CDEBUG(D_WARNING,
2d00bd17
JP
2453 "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
2454 ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
73b89907 2455 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
8c7b0e1a 2456 PFID(&obj->vob_header.coh_lu.loh_fid),
73b89907 2457 (path && !IS_ERR(path)) ? path : "", ioret);
d7e09d03 2458
6e16818b 2459 if (dentry)
d7e09d03
PT
2460 dput(dentry);
2461
6e16818b 2462 if (buf)
d7e09d03
PT
2463 free_page((unsigned long)buf);
2464}
c948390f 2465
dbf789ce
JX
2466ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
2467 struct lov_user_md **kbuf)
2468{
2469 struct lov_user_md lum;
2470 ssize_t lum_size;
2471
2472 if (copy_from_user(&lum, md, sizeof(lum))) {
2473 lum_size = -EFAULT;
2474 goto no_kbuf;
2475 }
2476
2477 lum_size = ll_lov_user_md_size(&lum);
2478 if (lum_size < 0)
2479 goto no_kbuf;
2480
2481 *kbuf = kzalloc(lum_size, GFP_NOFS);
2482 if (!*kbuf) {
2483 lum_size = -ENOMEM;
2484 goto no_kbuf;
2485 }
2486
2487 if (copy_from_user(*kbuf, md, lum_size) != 0) {
2488 kfree(*kbuf);
2489 *kbuf = NULL;
2490 lum_size = -EFAULT;
2491 }
2492no_kbuf:
2493 return lum_size;
2494}
2495
c948390f
GP
2496/*
2497 * Compute llite root squash state after a change of root squash
2498 * configuration setting or add/remove of a lnet nid
2499 */
2500void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
2501{
2502 struct root_squash_info *squash = &sbi->ll_squash;
06ef1af8 2503 struct lnet_process_id id;
c948390f
GP
2504 bool matched;
2505 int i;
2506
2507 /* Update norootsquash flag */
2508 down_write(&squash->rsi_sem);
2509 if (list_empty(&squash->rsi_nosquash_nids)) {
2510 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
2511 } else {
2512 /*
2513 * Do not apply root squash as soon as one of our NIDs is
2514 * in the nosquash_nids list
2515 */
2516 matched = false;
2517 i = 0;
2518
2519 while (LNetGetId(i++, &id) != -ENOENT) {
2520 if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND)
2521 continue;
2522 if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
2523 matched = true;
2524 break;
2525 }
2526 }
2527 if (matched)
2528 sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
2529 else
2530 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
2531 }
2532 up_write(&squash->rsi_sem);
2533}
a6d879fd
HD
2534
2535/**
2536 * Parse linkea content to extract information about a given hardlink
2537 *
2538 * \param[in] ldata - Initialized linkea data
2539 * \param[in] linkno - Link identifier
2540 * \param[out] parent_fid - The entry's parent FID
2541 * \param[in] size - Entry name destination buffer
2542 *
2543 * \retval 0 on success
2544 * \retval Appropriate negative error code on failure
2545 */
2546static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
2547 struct lu_fid *parent_fid, struct lu_name *ln)
2548{
2549 unsigned int idx;
2550 int rc;
2551
7da5e890 2552 rc = linkea_init_with_rec(ldata);
a6d879fd
HD
2553 if (rc < 0)
2554 return rc;
2555
2556 if (linkno >= ldata->ld_leh->leh_reccount)
2557 /* beyond last link */
2558 return -ENODATA;
2559
2560 linkea_first_entry(ldata);
2561 for (idx = 0; ldata->ld_lee; idx++) {
2562 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
2563 parent_fid);
2564 if (idx == linkno)
2565 break;
2566
2567 linkea_next_entry(ldata);
2568 }
2569
2570 if (idx < linkno)
2571 return -ENODATA;
2572
2573 return 0;
2574}
2575
2576/**
2577 * Get parent FID and name of an identified link. Operation is performed for
2578 * a given link number, letting the caller iterate over linkno to list one or
2579 * all links of an entry.
2580 *
2581 * \param[in] file - File descriptor against which to perform the operation
2582 * \param[in,out] arg - User-filled structure containing the linkno to operate
9f11748c
AG
2583 * on and the available size. It is eventually filled
2584 * with the requested information or left untouched on
2585 * error
a6d879fd
HD
2586 *
2587 * \retval - 0 on success
2588 * \retval - Appropriate negative error code on failure
2589 */
2590int ll_getparent(struct file *file, struct getparent __user *arg)
2591{
2592 struct inode *inode = file_inode(file);
2593 struct linkea_data *ldata;
2594 struct lu_fid parent_fid;
2595 struct lu_buf buf = {
2596 .lb_buf = NULL,
2597 .lb_len = 0
2598 };
2599 struct lu_name ln;
2600 u32 name_size;
2601 u32 linkno;
2602 int rc;
2603
2604 if (!capable(CFS_CAP_DAC_READ_SEARCH) &&
2605 !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
2606 return -EPERM;
2607
2608 if (get_user(name_size, &arg->gp_name_size))
2609 return -EFAULT;
2610
2611 if (get_user(linkno, &arg->gp_linkno))
2612 return -EFAULT;
2613
2614 if (name_size > PATH_MAX)
2615 return -EINVAL;
2616
2617 ldata = kzalloc(sizeof(*ldata), GFP_NOFS);
2618 if (!ldata)
2619 return -ENOMEM;
2620
2621 rc = linkea_data_new(ldata, &buf);
2622 if (rc < 0)
2623 goto ldata_free;
2624
2625 rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
2626 buf.lb_len, OBD_MD_FLXATTR);
2627 if (rc < 0)
2628 goto lb_free;
2629
2630 rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
2631 if (rc < 0)
2632 goto lb_free;
2633
2634 if (ln.ln_namelen >= name_size) {
2635 rc = -EOVERFLOW;
2636 goto lb_free;
2637 }
2638
2639 if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid))) {
2640 rc = -EFAULT;
2641 goto lb_free;
2642 }
2643
2644 if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen)) {
2645 rc = -EFAULT;
2646 goto lb_free;
2647 }
2648
2649 if (put_user('\0', arg->gp_name + ln.ln_namelen)) {
2650 rc = -EFAULT;
2651 goto lb_free;
2652 }
2653
2654lb_free:
2655 lu_buf_free(&buf);
2656ldata_free:
2657 kfree(ldata);
2658 return rc;
2659}