staging: lustre: llite: specify READA debug mask for ras_update
[linux-block.git] / drivers / staging / lustre / lustre / llite / llite_lib.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
6a5b99a4 18 * http://www.gnu.org/licenses/gpl-2.0.html
d7e09d03 19 *
d7e09d03
PT
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
1dc563a6 26 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/llite/llite_lib.c
33 *
34 * Lustre Light Super operations
35 */
36
37#define DEBUG_SUBSYSTEM S_LLITE
38
39#include <linux/module.h>
a9c7db39 40#include <linux/statfs.h>
d7e09d03 41#include <linux/types.h>
d7e09d03
PT
42#include <linux/mm.h>
43
8877d3bf 44#include "../include/lustre/lustre_ioctl.h"
67a235f5
GKH
45#include "../include/lustre_ha.h"
46#include "../include/lustre_dlm.h"
47#include "../include/lprocfs_status.h"
48#include "../include/lustre_disk.h"
49#include "../include/lustre_param.h"
50#include "../include/lustre_log.h"
51#include "../include/cl_object.h"
52#include "../include/obd_cksum.h"
d7e09d03
PT
53#include "llite_internal.h"
54
55struct kmem_cache *ll_file_data_slab;
ae7c0f48 56struct dentry *llite_root;
fd0d04ba 57struct kset *llite_kset;
d7e09d03 58
d7e09d03
PT
59#ifndef log2
60#define log2(n) ffz(~(n))
61#endif
62
fd0d04ba 63static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
d7e09d03
PT
64{
65 struct ll_sb_info *sbi = NULL;
66 unsigned long pages;
67 unsigned long lru_page_max;
68 struct sysinfo si;
69 class_uuid_t uuid;
70 int i;
d7e09d03 71
496a51bd 72 sbi = kzalloc(sizeof(*sbi), GFP_NOFS);
d7e09d03 73 if (!sbi)
0a3bdb00 74 return NULL;
d7e09d03
PT
75
76 spin_lock_init(&sbi->ll_lock);
77 mutex_init(&sbi->ll_lco.lco_lock);
78 spin_lock_init(&sbi->ll_pp_extent_lock);
79 spin_lock_init(&sbi->ll_process_lock);
80 sbi->ll_rw_stats_on = 0;
81
82 si_meminfo(&si);
83 pages = si.totalram - si.totalhigh;
5196e42c 84 lru_page_max = pages / 2;
d7e09d03 85
1b02bde3
EL
86 sbi->ll_cache = cl_cache_init(lru_page_max);
87 if (!sbi->ll_cache) {
88 kfree(sbi);
89 return NULL;
90 }
ac5b1481 91
d7e09d03
PT
92 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
93 SBI_DEFAULT_READAHEAD_MAX);
94 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
95 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
96 SBI_DEFAULT_READAHEAD_WHOLE_MAX;
d7e09d03
PT
97
98 ll_generate_random_uuid(uuid);
99 class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
100 CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
101
d7e09d03
PT
102 sbi->ll_flags |= LL_SBI_VERBOSE;
103 sbi->ll_flags |= LL_SBI_CHECKSUM;
104
105 sbi->ll_flags |= LL_SBI_LRU_RESIZE;
7304370b 106 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
d7e09d03
PT
107
108 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
109 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
110 pp_r_hist.oh_lock);
111 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
112 pp_w_hist.oh_lock);
113 }
114
115 /* metadata statahead is enabled by default */
116 sbi->ll_sa_max = LL_SA_RPC_DEF;
117 atomic_set(&sbi->ll_sa_total, 0);
118 atomic_set(&sbi->ll_sa_wrong, 0);
e9792be1 119 atomic_set(&sbi->ll_sa_running, 0);
d7e09d03
PT
120 atomic_set(&sbi->ll_agl_total, 0);
121 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
122
c948390f
GP
123 /* root squash */
124 sbi->ll_squash.rsi_uid = 0;
125 sbi->ll_squash.rsi_gid = 0;
126 INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
127 init_rwsem(&sbi->ll_squash.rsi_sem);
128
fd0d04ba
OD
129 sbi->ll_sb = sb;
130
0a3bdb00 131 return sbi;
d7e09d03
PT
132}
133
2d95f10e 134static void ll_free_sbi(struct super_block *sb)
d7e09d03
PT
135{
136 struct ll_sb_info *sbi = ll_s2sbi(sb);
d7e09d03 137
1b02bde3 138 if (sbi->ll_cache) {
c948390f
GP
139 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
140 cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
1b02bde3
EL
141 cl_cache_decref(sbi->ll_cache);
142 sbi->ll_cache = NULL;
143 }
144
ad88aae0 145 kfree(sbi);
d7e09d03
PT
146}
147
d7e09d03
PT
148static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
149 struct vfsmount *mnt)
150{
ea7893bb 151 struct inode *root = NULL;
d7e09d03
PT
152 struct ll_sb_info *sbi = ll_s2sbi(sb);
153 struct obd_device *obd;
d7e09d03
PT
154 struct obd_statfs *osfs = NULL;
155 struct ptlrpc_request *request = NULL;
156 struct obd_connect_data *data = NULL;
157 struct obd_uuid *uuid;
158 struct md_op_data *op_data;
159 struct lustre_md lmd;
21aef7d9 160 u64 valid;
d7e09d03 161 int size, err, checksum;
d7e09d03
PT
162
163 obd = class_name2obd(md);
164 if (!obd) {
165 CERROR("MD %s: not setup or attached\n", md);
0a3bdb00 166 return -EINVAL;
d7e09d03
PT
167 }
168
496a51bd
JL
169 data = kzalloc(sizeof(*data), GFP_NOFS);
170 if (!data)
0a3bdb00 171 return -ENOMEM;
d7e09d03 172
496a51bd
JL
173 osfs = kzalloc(sizeof(*osfs), GFP_NOFS);
174 if (!osfs) {
97903a26 175 kfree(data);
0a3bdb00 176 return -ENOMEM;
d7e09d03
PT
177 }
178
d7e09d03
PT
179 /* indicate the features supported by this client */
180 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
181 OBD_CONNECT_ATTRFID |
182 OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
d7e09d03
PT
183 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
184 OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
341f1f0a
FY
185 OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
186 OBD_CONNECT_64BITHASH |
d7e09d03
PT
187 OBD_CONNECT_EINPROGRESS |
188 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
7fc1f831 189 OBD_CONNECT_LAYOUTLOCK |
69342b78
AS
190 OBD_CONNECT_PINGLESS |
191 OBD_CONNECT_MAX_EASIZE |
63d42578 192 OBD_CONNECT_FLOCK_DEAD |
c1b66fcc 193 OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
4edc630a 194 OBD_CONNECT_OPEN_BY_FID |
8bcaef92
LZ
195 OBD_CONNECT_DIR_STRIPE |
196 OBD_CONNECT_BULK_MBITS;
d7e09d03 197
d7e09d03
PT
198 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
199 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
200#ifdef CONFIG_FS_POSIX_ACL
201 data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
202#endif
203
204 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
205 /* flag mdc connection as lightweight, only used for test
c0894c6c
OD
206 * purpose, use with care
207 */
d7e09d03
PT
208 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
209
210 data->ocd_ibits_known = MDS_INODELOCK_FULL;
211 data->ocd_version = LUSTRE_VERSION_CODE;
212
213 if (sb->s_flags & MS_RDONLY)
214 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
215 if (sbi->ll_flags & LL_SBI_USER_XATTR)
216 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
217
d7e09d03
PT
218 if (sbi->ll_flags & LL_SBI_FLOCK)
219 sbi->ll_fop = &ll_file_operations_flock;
220 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
221 sbi->ll_fop = &ll_file_operations;
222 else
223 sbi->ll_fop = &ll_file_operations_noflock;
224
225 /* real client */
226 data->ocd_connect_flags |= OBD_CONNECT_REAL;
d7e09d03 227
bfb9944c
WW
228 /* always ping even if server suppress_pings */
229 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
230 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
231
d7e09d03
PT
232 data->ocd_brw_size = MD_MAX_BRW_SIZE;
233
e6768831
TJ
234 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
235 data, NULL);
d7e09d03 236 if (err == -EBUSY) {
2d00bd17
JP
237 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
238 md);
34e1f2bb 239 goto out;
d7e09d03
PT
240 } else if (err) {
241 CERROR("cannot connect to %s: rc = %d\n", md, err);
34e1f2bb 242 goto out;
d7e09d03
PT
243 }
244
245 sbi->ll_md_exp->exp_connect_data = *data;
246
247 err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
248 LUSTRE_SEQ_METADATA);
249 if (err) {
2d00bd17
JP
250 CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
251 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 252 goto out_md;
d7e09d03
PT
253 }
254
255 /* For mount, we only need fs info from MDT0, and also in DNE, it
256 * can make sure the client can be mounted as long as MDT0 is
c0894c6c
OD
257 * available
258 */
d7e09d03 259 err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
e15ba45d
OD
260 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
261 OBD_STATFS_FOR_MDT0);
d7e09d03 262 if (err)
34e1f2bb 263 goto out_md_fid;
d7e09d03
PT
264
265 /* This needs to be after statfs to ensure connect has finished.
266 * Note that "data" does NOT contain the valid connect reply.
267 * If connecting to a 1.8 server there will be no LMV device, so
268 * we can access the MDC export directly and exp_connect_flags will
269 * be non-zero, but if accessing an upgraded 2.1 server it will
270 * have the correct flags filled in.
c0894c6c
OD
271 * XXX: fill in the LMV exp_connect_flags from MDC(s).
272 */
d7e09d03
PT
273 valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
274 if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
275 valid != CLIENT_CONNECT_MDT_REQD) {
276 char *buf;
277
09cbfeaf 278 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
db562e81
GEHP
279 if (!buf) {
280 err = -ENOMEM;
281 goto out_md_fid;
282 }
09cbfeaf 283 obd_connect_flags2str(buf, PAGE_SIZE,
d7e09d03 284 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
2d00bd17 285 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
d7e09d03 286 sbi->ll_md_exp->exp_obd->obd_name, buf);
97903a26 287 kfree(buf);
34e1f2bb
JL
288 err = -EPROTO;
289 goto out_md_fid;
d7e09d03
PT
290 }
291
292 size = sizeof(*data);
293 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
7c6564d0 294 KEY_CONN_DATA, &size, data);
d7e09d03
PT
295 if (err) {
296 CERROR("%s: Get connect data failed: rc = %d\n",
297 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 298 goto out_md_fid;
d7e09d03
PT
299 }
300
301 LASSERT(osfs->os_bsize);
302 sb->s_blocksize = osfs->os_bsize;
303 sb->s_blocksize_bits = log2(osfs->os_bsize);
304 sb->s_magic = LL_SUPER_MAGIC;
305 sb->s_maxbytes = MAX_LFS_FILESIZE;
306 sbi->ll_namelen = osfs->os_namelen;
d7e09d03
PT
307
308 if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
309 !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
2d00bd17 310 LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
d7e09d03
PT
311 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
312 }
313
314 if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
d7e09d03 315 sb->s_flags |= MS_POSIXACL;
d7e09d03
PT
316 sbi->ll_flags |= LL_SBI_ACL;
317 } else {
318 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
d7e09d03 319 sb->s_flags &= ~MS_POSIXACL;
d7e09d03
PT
320 sbi->ll_flags &= ~LL_SBI_ACL;
321 }
322
d7e09d03
PT
323 if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
324 sbi->ll_flags |= LL_SBI_64BIT_HASH;
325
326 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
d8c0b0a9 327 sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_SHIFT;
d7e09d03 328 else
d8c0b0a9 329 sbi->ll_md_brw_pages = 1;
d7e09d03 330
ae33a836 331 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
d7e09d03 332 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
d7e09d03 333
7fc1f831
AP
334 if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
335 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
336 LCONSOLE_INFO(
337 "%s: disabling xattr cache due to unknown maximum xattr size.\n",
338 dt);
339 } else {
340 sbi->ll_flags |= LL_SBI_XATTR_CACHE;
341 sbi->ll_xattr_cache_enabled = 1;
342 }
343 }
344
d7e09d03
PT
345 obd = class_name2obd(dt);
346 if (!obd) {
347 CERROR("DT %s: not setup or attached\n", dt);
34e1f2bb
JL
348 err = -ENODEV;
349 goto out_md_fid;
d7e09d03
PT
350 }
351
352 data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
353 OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
354 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
355 OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
341f1f0a
FY
356 OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
357 OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
358 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
d7e09d03
PT
359 OBD_CONNECT_EINPROGRESS |
360 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
8bcaef92
LZ
361 OBD_CONNECT_LAYOUTLOCK |
362 OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
363 OBD_CONNECT_BULK_MBITS;
d7e09d03 364
d7e09d03
PT
365 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
366 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
367 * disabled by default, because it can still be enabled on the
40cc864a 368 * fly via /sys. As a consequence, we still need to come to an
c0894c6c
OD
369 * agreement on the supported algorithms at connect time
370 */
d7e09d03
PT
371 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
372
373 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
374 data->ocd_cksum_types = OBD_CKSUM_ADLER;
375 else
376 data->ocd_cksum_types = cksum_types_supported_client();
377 }
378
379 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
d7e09d03 380
bfb9944c
WW
381 /* always ping even if server suppress_pings */
382 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
383 data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
384
2d00bd17
JP
385 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
386 data->ocd_connect_flags,
d7e09d03
PT
387 data->ocd_version, data->ocd_grant);
388
389 obd->obd_upcall.onu_owner = &sbi->ll_lco;
390 obd->obd_upcall.onu_upcall = cl_ocd_update;
391
392 data->ocd_brw_size = DT_MAX_BRW_SIZE;
393
394 err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
395 NULL);
396 if (err == -EBUSY) {
2d00bd17
JP
397 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
398 dt);
34e1f2bb 399 goto out_md;
d7e09d03
PT
400 } else if (err) {
401 CERROR("%s: Cannot connect to %s: rc = %d\n",
402 sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
34e1f2bb 403 goto out_md;
d7e09d03
PT
404 }
405
406 sbi->ll_dt_exp->exp_connect_data = *data;
407
408 err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
409 LUSTRE_SEQ_METADATA);
410 if (err) {
2d00bd17
JP
411 CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
412 sbi->ll_dt_exp->exp_obd->obd_name, err);
34e1f2bb 413 goto out_dt;
d7e09d03
PT
414 }
415
416 mutex_lock(&sbi->ll_lco.lco_lock);
417 sbi->ll_lco.lco_flags = data->ocd_connect_flags;
418 sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
419 sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
420 mutex_unlock(&sbi->ll_lco.lco_lock);
421
422 fid_zero(&sbi->ll_root_fid);
ef2e0f55 423 err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid);
d7e09d03
PT
424 if (err) {
425 CERROR("cannot mds_connect: rc = %d\n", err);
34e1f2bb 426 goto out_lock_cn_cb;
d7e09d03
PT
427 }
428 if (!fid_is_sane(&sbi->ll_root_fid)) {
429 CERROR("%s: Invalid root fid "DFID" during mount\n",
430 sbi->ll_md_exp->exp_obd->obd_name,
431 PFID(&sbi->ll_root_fid));
34e1f2bb
JL
432 err = -EINVAL;
433 goto out_lock_cn_cb;
d7e09d03
PT
434 }
435 CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
436
437 sb->s_op = &lustre_super_operations;
2c563880 438 sb->s_xattr = ll_xattr_handlers;
d7e09d03
PT
439#if THREAD_SIZE >= 8192 /*b=17630*/
440 sb->s_export_op = &lustre_export_operations;
441#endif
442
443 /* make root inode
c0894c6c
OD
444 * XXX: move this to after cbd setup?
445 */
483eec0d 446 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
341f1f0a 447 if (sbi->ll_flags & LL_SBI_ACL)
d7e09d03
PT
448 valid |= OBD_MD_FLACL;
449
496a51bd
JL
450 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
451 if (!op_data) {
34e1f2bb
JL
452 err = -ENOMEM;
453 goto out_lock_cn_cb;
454 }
d7e09d03
PT
455
456 op_data->op_fid1 = sbi->ll_root_fid;
457 op_data->op_mode = 0;
d7e09d03
PT
458 op_data->op_valid = valid;
459
460 err = md_getattr(sbi->ll_md_exp, op_data, &request);
97903a26 461 kfree(op_data);
d7e09d03
PT
462 if (err) {
463 CERROR("%s: md_getattr failed for root: rc = %d\n",
464 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 465 goto out_lock_cn_cb;
d7e09d03
PT
466 }
467
468 err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
469 sbi->ll_md_exp, &lmd);
470 if (err) {
471 CERROR("failed to understand root inode md: rc = %d\n", err);
472 ptlrpc_req_finished(request);
34e1f2bb 473 goto out_lock_cn_cb;
d7e09d03
PT
474 }
475
476 LASSERT(fid_is_sane(&sbi->ll_root_fid));
477 root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
c1e2699d 478 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03
PT
479 &lmd);
480 md_free_lustre_md(sbi->ll_md_exp, &lmd);
481 ptlrpc_req_finished(request);
482
c3397e7e 483 if (IS_ERR(root)) {
d7e09d03
PT
484#ifdef CONFIG_FS_POSIX_ACL
485 if (lmd.posix_acl) {
486 posix_acl_release(lmd.posix_acl);
487 lmd.posix_acl = NULL;
488 }
489#endif
020ecc6f 490 err = -EBADF;
d7e09d03 491 CERROR("lustre_lite: bad iget4 for root\n");
34e1f2bb 492 goto out_root;
d7e09d03
PT
493 }
494
d7e09d03
PT
495 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
496 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
497 KEY_CHECKSUM, sizeof(checksum), &checksum,
498 NULL);
76cc3abe
YS
499 if (err) {
500 CERROR("%s: Set checksum failed: rc = %d\n",
501 sbi->ll_dt_exp->exp_obd->obd_name, err);
502 goto out_root;
503 }
d7e09d03
PT
504 cl_sb_init(sb);
505
506 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
1b02bde3
EL
507 KEY_CACHE_SET, sizeof(*sbi->ll_cache),
508 sbi->ll_cache, NULL);
76cc3abe
YS
509 if (err) {
510 CERROR("%s: Set cache_set failed: rc = %d\n",
511 sbi->ll_dt_exp->exp_obd->obd_name, err);
512 goto out_root;
513 }
d7e09d03
PT
514
515 sb->s_root = d_make_root(root);
6e16818b 516 if (!sb->s_root) {
d7e09d03 517 CERROR("%s: can't make root dentry\n",
e15ba45d 518 ll_get_fsname(sb, NULL, 0));
34e1f2bb 519 err = -ENOMEM;
caf382fe 520 goto out_lock_cn_cb;
d7e09d03
PT
521 }
522
d7e09d03
PT
523 sbi->ll_sdev_orig = sb->s_dev;
524
525 /* We set sb->s_dev equal on all lustre clients in order to support
526 * NFS export clustering. NFSD requires that the FSID be the same
c0894c6c
OD
527 * on all clients.
528 */
d7e09d03 529 /* s_dev is also used in lt_compare() to compare two fs, but that is
c0894c6c
OD
530 * only a node-local comparison.
531 */
d7e09d03 532 uuid = obd_get_uuid(sbi->ll_md_exp);
6e16818b 533 if (uuid) {
d7e09d03 534 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
bd994071
FY
535 get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
536 }
d7e09d03 537
081825f5
JL
538 kfree(data);
539 kfree(osfs);
d7e09d03 540
46dfb5aa
GM
541 if (llite_root) {
542 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
543 if (err < 0) {
544 CERROR("%s: could not register mount in debugfs: "
545 "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
546 err = 0;
547 }
548 }
549
0a3bdb00 550 return err;
d7e09d03 551out_root:
ddafd514 552 iput(root);
d7e09d03
PT
553out_lock_cn_cb:
554 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
555out_dt:
556 obd_disconnect(sbi->ll_dt_exp);
557 sbi->ll_dt_exp = NULL;
d7e09d03
PT
558out_md_fid:
559 obd_fid_fini(sbi->ll_md_exp->exp_obd);
560out_md:
561 obd_disconnect(sbi->ll_md_exp);
562 sbi->ll_md_exp = NULL;
563out:
081825f5
JL
564 kfree(data);
565 kfree(osfs);
d7e09d03
PT
566 return err;
567}
568
569int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
570{
571 int size, rc;
572
f648eed6
JH
573 size = sizeof(*lmmsize);
574 rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
575 KEY_MAX_EASIZE, &size, lmmsize);
576 if (rc) {
577 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
578 sbi->ll_dt_exp->exp_obd->obd_name, rc);
579 return rc;
580 }
581
d7e09d03
PT
582 size = sizeof(int);
583 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
7c6564d0 584 KEY_MAX_EASIZE, &size, lmmsize);
d7e09d03 585 if (rc)
4f211c20 586 CERROR("Get max mdsize error rc %d\n", rc);
d7e09d03 587
0a3bdb00 588 return rc;
44779340
BB
589}
590
60b65afb
NB
591/**
592 * Get the value of the default_easize parameter.
593 *
594 * \see client_obd::cl_default_mds_easize
595 *
596 * \param[in] sbi superblock info for this filesystem
597 * \param[out] lmmsize pointer to storage location for value
598 *
599 * \retval 0 on success
600 * \retval negative negated errno on failure
601 */
44779340
BB
602int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
603{
604 int size, rc;
605
606 size = sizeof(int);
607 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
7c6564d0 608 KEY_DEFAULT_EASIZE, &size, lmmsize);
44779340
BB
609 if (rc)
610 CERROR("Get default mdsize error rc %d\n", rc);
611
612 return rc;
613}
614
60b65afb
NB
615/**
616 * Set the default_easize parameter to the given value.
617 *
618 * \see client_obd::cl_default_mds_easize
619 *
620 * \param[in] sbi superblock info for this filesystem
621 * \param[in] lmmsize the size to set
622 *
623 * \retval 0 on success
624 * \retval negative negated errno on failure
625 */
626int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
627{
8ed62e91
NB
628 if (lmmsize < sizeof(struct lov_mds_md) ||
629 lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
60b65afb
NB
630 return -EINVAL;
631
632 return obd_set_info_async(NULL, sbi->ll_md_exp,
633 sizeof(KEY_DEFAULT_EASIZE),
634 KEY_DEFAULT_EASIZE,
635 sizeof(int), &lmmsize, NULL);
636}
637
2d95f10e 638static void client_common_put_super(struct super_block *sb)
d7e09d03
PT
639{
640 struct ll_sb_info *sbi = ll_s2sbi(sb);
d7e09d03 641
d7e09d03
PT
642 cl_sb_fini(sb);
643
d7e09d03
PT
644 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
645 obd_disconnect(sbi->ll_dt_exp);
646 sbi->ll_dt_exp = NULL;
d7e09d03 647
ae7c0f48 648 ldebugfs_unregister_mountpoint(sbi);
d7e09d03
PT
649
650 obd_fid_fini(sbi->ll_md_exp->exp_obd);
651 obd_disconnect(sbi->ll_md_exp);
652 sbi->ll_md_exp = NULL;
d7e09d03
PT
653}
654
655void ll_kill_super(struct super_block *sb)
656{
657 struct ll_sb_info *sbi;
658
d7e09d03
PT
659 /* not init sb ?*/
660 if (!(sb->s_flags & MS_ACTIVE))
661 return;
662
663 sbi = ll_s2sbi(sb);
e6768831
TJ
664 /* we need to restore s_dev from changed for clustered NFS before
665 * put_super because new kernels have cached s_dev and change sb->s_dev
c0894c6c
OD
666 * in put_super not affected real removing devices
667 */
65fb55d1 668 if (sbi) {
d7e09d03 669 sb->s_dev = sbi->ll_sdev_orig;
65fb55d1 670 sbi->ll_umounting = 1;
e9792be1
LS
671
672 /* wait running statahead threads to quit */
673 while (atomic_read(&sbi->ll_sa_running) > 0) {
674 set_current_state(TASK_UNINTERRUPTIBLE);
675 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC >> 3));
676 }
65fb55d1 677 }
d7e09d03
PT
678}
679
d7e09d03
PT
680static inline int ll_set_opt(const char *opt, char *data, int fl)
681{
682 if (strncmp(opt, data, strlen(opt)) != 0)
fbe7c6c7 683 return 0;
d7e09d03 684 else
fbe7c6c7 685 return fl;
d7e09d03
PT
686}
687
688/* non-client-specific mount options are parsed in lmd_parse */
689static int ll_options(char *options, int *flags)
690{
691 int tmp;
692 char *s1 = options, *s2;
d7e09d03
PT
693
694 if (!options)
0a3bdb00 695 return 0;
d7e09d03
PT
696
697 CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
698
699 while (*s1) {
700 CDEBUG(D_SUPER, "next opt=%s\n", s1);
701 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
702 if (tmp) {
703 *flags |= tmp;
704 goto next;
705 }
706 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
707 if (tmp) {
708 *flags |= tmp;
709 goto next;
710 }
711 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
712 if (tmp) {
713 *flags |= tmp;
714 goto next;
715 }
cd94f231
OD
716 tmp = ll_set_opt("noflock", s1,
717 LL_SBI_FLOCK | LL_SBI_LOCALFLOCK);
d7e09d03
PT
718 if (tmp) {
719 *flags &= ~tmp;
720 goto next;
721 }
722 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
723 if (tmp) {
724 *flags |= tmp;
725 goto next;
726 }
727 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
728 if (tmp) {
729 *flags &= ~tmp;
730 goto next;
731 }
542c45ac
AW
732 tmp = ll_set_opt("context", s1, 1);
733 if (tmp)
734 goto next;
735 tmp = ll_set_opt("fscontext", s1, 1);
736 if (tmp)
737 goto next;
738 tmp = ll_set_opt("defcontext", s1, 1);
739 if (tmp)
740 goto next;
741 tmp = ll_set_opt("rootcontext", s1, 1);
742 if (tmp)
743 goto next;
d7e09d03
PT
744 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
745 if (tmp) {
746 *flags |= tmp;
747 goto next;
748 }
749 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
750 if (tmp) {
751 *flags &= ~tmp;
752 goto next;
753 }
754
755 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
756 if (tmp) {
757 *flags |= tmp;
758 goto next;
759 }
760 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
761 if (tmp) {
762 *flags &= ~tmp;
763 goto next;
764 }
765 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
766 if (tmp) {
767 *flags |= tmp;
768 goto next;
769 }
770 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
771 if (tmp) {
772 *flags &= ~tmp;
773 goto next;
774 }
775 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
776 if (tmp) {
777 *flags |= tmp;
778 goto next;
779 }
780 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
781 if (tmp) {
782 *flags &= ~tmp;
783 goto next;
784 }
d7e09d03
PT
785 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
786 if (tmp) {
787 *flags |= tmp;
788 goto next;
789 }
790 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
791 if (tmp) {
792 *flags |= tmp;
793 goto next;
794 }
795 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
796 if (tmp) {
797 *flags &= ~tmp;
798 goto next;
799 }
bfb9944c
WW
800 tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
801 if (tmp) {
802 *flags |= tmp;
803 goto next;
804 }
d7e09d03
PT
805 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
806 s1);
0a3bdb00 807 return -EINVAL;
d7e09d03
PT
808
809next:
810 /* Find next opt */
811 s2 = strchr(s1, ',');
6e16818b 812 if (!s2)
d7e09d03
PT
813 break;
814 s1 = s2 + 1;
815 }
0a3bdb00 816 return 0;
d7e09d03
PT
817}
818
819void ll_lli_init(struct ll_inode_info *lli)
820{
821 lli->lli_inode_magic = LLI_INODE_MAGIC;
822 lli->lli_flags = 0;
d7e09d03
PT
823 spin_lock_init(&lli->lli_lock);
824 lli->lli_posix_acl = NULL;
d7e09d03
PT
825 /* Do not set lli_fid, it has been initialized already. */
826 fid_zero(&lli->lli_pfid);
d7e09d03
PT
827 lli->lli_mds_read_och = NULL;
828 lli->lli_mds_write_och = NULL;
829 lli->lli_mds_exec_och = NULL;
830 lli->lli_open_fd_read_count = 0;
831 lli->lli_open_fd_write_count = 0;
832 lli->lli_open_fd_exec_count = 0;
833 mutex_init(&lli->lli_och_mutex);
834 spin_lock_init(&lli->lli_agl_lock);
09aed8a5 835 spin_lock_init(&lli->lli_layout_lock);
55554f31 836 ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
d7e09d03
PT
837 lli->lli_clob = NULL;
838
7fc1f831
AP
839 init_rwsem(&lli->lli_xattrs_list_rwsem);
840 mutex_init(&lli->lli_xattrs_enq_lock);
841
d7e09d03
PT
842 LASSERT(lli->lli_vfs_inode.i_mode != 0);
843 if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
844 mutex_init(&lli->lli_readdir_mutex);
845 lli->lli_opendir_key = NULL;
846 lli->lli_sai = NULL;
d7e09d03
PT
847 spin_lock_init(&lli->lli_sa_lock);
848 lli->lli_opendir_pid = 0;
e9792be1 849 lli->lli_sa_enabled = 0;
d81e9009 850 lli->lli_def_stripe_offset = -1;
d7e09d03 851 } else {
47a57bde 852 mutex_init(&lli->lli_size_mutex);
d7e09d03
PT
853 lli->lli_symlink_name = NULL;
854 init_rwsem(&lli->lli_trunc_sem);
5b8a39c5 855 range_lock_tree_init(&lli->lli_write_tree);
d7e09d03
PT
856 init_rwsem(&lli->lli_glimpse_sem);
857 lli->lli_glimpse_time = 0;
858 INIT_LIST_HEAD(&lli->lli_agl_list);
859 lli->lli_agl_index = 0;
860 lli->lli_async_rc = 0;
d7e09d03
PT
861 }
862 mutex_init(&lli->lli_layout_mutex);
863}
864
865static inline int ll_bdi_register(struct backing_dev_info *bdi)
866{
867 static atomic_t ll_bdi_num = ATOMIC_INIT(0);
868
869 bdi->name = "lustre";
870 return bdi_register(bdi, NULL, "lustre-%d",
871 atomic_inc_return(&ll_bdi_num));
872}
873
874int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
875{
876 struct lustre_profile *lprof = NULL;
877 struct lustre_sb_info *lsi = s2lsi(sb);
878 struct ll_sb_info *sbi;
879 char *dt = NULL, *md = NULL;
880 char *profilenm = get_profile_name(sb);
881 struct config_llog_instance *cfg;
d7e09d03 882 int err;
d7e09d03
PT
883
884 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
885
496a51bd
JL
886 cfg = kzalloc(sizeof(*cfg), GFP_NOFS);
887 if (!cfg)
0a3bdb00 888 return -ENOMEM;
d7e09d03
PT
889
890 try_module_get(THIS_MODULE);
891
892 /* client additional sb info */
7551b8b5
NC
893 sbi = ll_init_sbi(sb);
894 lsi->lsi_llsbi = sbi;
d7e09d03
PT
895 if (!sbi) {
896 module_put(THIS_MODULE);
97903a26 897 kfree(cfg);
0a3bdb00 898 return -ENOMEM;
d7e09d03
PT
899 }
900
901 err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
902 if (err)
34e1f2bb 903 goto out_free;
d7e09d03
PT
904
905 err = bdi_init(&lsi->lsi_bdi);
906 if (err)
34e1f2bb 907 goto out_free;
d7e09d03 908 lsi->lsi_flags |= LSI_BDI_INITIALIZED;
b4caecd4 909 lsi->lsi_bdi.capabilities = 0;
d7e09d03
PT
910 err = ll_bdi_register(&lsi->lsi_bdi);
911 if (err)
34e1f2bb 912 goto out_free;
d7e09d03
PT
913
914 sb->s_bdi = &lsi->lsi_bdi;
3ea8f3bc
LS
915 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
916 sb->s_d_op = &ll_d_ops;
d7e09d03
PT
917
918 /* Generate a string unique to this super, in case some joker tries
c0894c6c
OD
919 * to mount the same fs at two mount points.
920 * Use the address of the super itself.
921 */
d7e09d03
PT
922 cfg->cfg_instance = sb;
923 cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
924 cfg->cfg_callback = class_config_llog_handler;
925 /* set up client obds */
926 err = lustre_process_log(sb, profilenm, cfg);
4fd9a8e9 927 if (err < 0)
34e1f2bb 928 goto out_free;
d7e09d03
PT
929
930 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
931 lprof = class_get_profile(profilenm);
6e16818b 932 if (!lprof) {
2d00bd17
JP
933 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
934 profilenm);
34e1f2bb
JL
935 err = -EINVAL;
936 goto out_free;
d7e09d03
PT
937 }
938 CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
939 lprof->lp_md, lprof->lp_dt);
940
95745e9b 941 dt = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
34e1f2bb
JL
942 if (!dt) {
943 err = -ENOMEM;
944 goto out_free;
945 }
d7e09d03 946
ef2e1a44 947 md = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_md, cfg->cfg_instance);
34e1f2bb
JL
948 if (!md) {
949 err = -ENOMEM;
950 goto out_free;
951 }
d7e09d03
PT
952
953 /* connections, registrations, sb setup */
954 err = client_common_fill_super(sb, md, dt, mnt);
0cd99931
JH
955 if (!err)
956 sbi->ll_client_common_fill_super_succeeded = 1;
d7e09d03
PT
957
958out_free:
0550db92 959 kfree(md);
960 kfree(dt);
f65053df
HN
961 if (lprof)
962 class_put_profile(lprof);
d7e09d03
PT
963 if (err)
964 ll_put_super(sb);
965 else if (sbi->ll_flags & LL_SBI_VERBOSE)
966 LCONSOLE_WARN("Mounted %s\n", profilenm);
967
97903a26 968 kfree(cfg);
0a3bdb00 969 return err;
d7e09d03
PT
970} /* ll_fill_super */
971
d7e09d03
PT
972void ll_put_super(struct super_block *sb)
973{
7d4bae45 974 struct config_llog_instance cfg, params_cfg;
d7e09d03
PT
975 struct obd_device *obd;
976 struct lustre_sb_info *lsi = s2lsi(sb);
977 struct ll_sb_info *sbi = ll_s2sbi(sb);
978 char *profilenm = get_profile_name(sb);
29c877a5
SC
979 int next, force = 1, rc = 0;
980 long ccc_count;
d7e09d03
PT
981
982 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
983
d7e09d03
PT
984 cfg.cfg_instance = sb;
985 lustre_end_log(sb, profilenm, &cfg);
986
7d4bae45
AB
987 params_cfg.cfg_instance = sb;
988 lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
989
d7e09d03
PT
990 if (sbi->ll_md_exp) {
991 obd = class_exp2obd(sbi->ll_md_exp);
992 if (obd)
993 force = obd->obd_force;
994 }
995
ac5b1481
PS
996 /* Wait for unstable pages to be committed to stable storage */
997 if (!force) {
998 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
999
1b02bde3 1000 rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
29c877a5 1001 !atomic_long_read(&sbi->ll_cache->ccc_unstable_nr),
ac5b1481
PS
1002 &lwi);
1003 }
1004
29c877a5 1005 ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
ac5b1481 1006 if (!force && rc != -EINTR)
29c877a5 1007 LASSERTF(!ccc_count, "count: %li\n", ccc_count);
ac5b1481 1008
d7e09d03 1009 /* We need to set force before the lov_disconnect in
c0894c6c
OD
1010 * lustre_common_put_super, since l_d cleans up osc's as well.
1011 */
d7e09d03
PT
1012 if (force) {
1013 next = 0;
1014 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
1015 &next)) != NULL) {
1016 obd->obd_force = force;
1017 }
1018 }
1019
0cd99931 1020 if (sbi->ll_client_common_fill_super_succeeded) {
d7e09d03
PT
1021 /* Only if client_common_fill_super succeeded */
1022 client_common_put_super(sb);
1023 }
1024
1025 next = 0;
a15dbf99 1026 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
d7e09d03 1027 class_manual_cleanup(obd);
d7e09d03
PT
1028
1029 if (sbi->ll_flags & LL_SBI_VERBOSE)
1030 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1031
1032 if (profilenm)
1033 class_del_profile(profilenm);
1034
1035 if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1036 bdi_destroy(&lsi->lsi_bdi);
1037 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1038 }
1039
1040 ll_free_sbi(sb);
1041 lsi->lsi_llsbi = NULL;
1042
1043 lustre_common_put_super(sb);
1044
26f98e82
JX
1045 cl_env_cache_purge(~0);
1046
d7e09d03 1047 module_put(THIS_MODULE);
d7e09d03
PT
1048} /* client_put_super */
1049
1050struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1051{
1052 struct inode *inode = NULL;
1053
1054 /* NOTE: we depend on atomic igrab() -bzzz */
1055 lock_res_and_lock(lock);
1056 if (lock->l_resource->lr_lvb_inode) {
aff9d8e8 1057 struct ll_inode_info *lli;
cf29a7b6 1058
d7e09d03
PT
1059 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1060 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1061 inode = igrab(lock->l_resource->lr_lvb_inode);
1062 } else {
1063 inode = lock->l_resource->lr_lvb_inode;
1064 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
2d00bd17 1065 D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
d7e09d03
PT
1066 lock->l_resource->lr_lvb_inode,
1067 lli->lli_inode_magic);
1068 inode = NULL;
1069 }
1070 }
1071 unlock_res_and_lock(lock);
1072 return inode;
1073}
1074
a80ba5fe 1075void ll_dir_clear_lsm_md(struct inode *inode)
2de35386 1076{
1077 struct ll_inode_info *lli = ll_i2info(inode);
1078
1079 LASSERT(S_ISDIR(inode->i_mode));
1080
1081 if (lli->lli_lsm_md) {
1082 lmv_free_memmd(lli->lli_lsm_md);
1083 lli->lli_lsm_md = NULL;
1084 }
1085}
1086
1087static struct inode *ll_iget_anon_dir(struct super_block *sb,
1088 const struct lu_fid *fid,
1089 struct lustre_md *md)
1090{
1091 struct ll_sb_info *sbi = ll_s2sbi(sb);
1092 struct mdt_body *body = md->body;
1093 struct inode *inode;
1094 ino_t ino;
1095
1096 ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1097 inode = iget_locked(sb, ino);
1098 if (!inode) {
1099 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1100 ll_get_fsname(sb, NULL, 0), PFID(fid));
1101 return ERR_PTR(-ENOENT);
1102 }
1103
1104 if (inode->i_state & I_NEW) {
1105 struct ll_inode_info *lli = ll_i2info(inode);
1106 struct lmv_stripe_md *lsm = md->lmv;
1107
1108 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2e1b5b8b 1109 (body->mbo_mode & S_IFMT);
2de35386 1110 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1111 PFID(fid));
1112
1113 LTIME_S(inode->i_mtime) = 0;
1114 LTIME_S(inode->i_atime) = 0;
1115 LTIME_S(inode->i_ctime) = 0;
1116 inode->i_rdev = 0;
1117
1118 inode->i_op = &ll_dir_inode_operations;
1119 inode->i_fop = &ll_dir_operations;
1120 lli->lli_fid = *fid;
1121 ll_lli_init(lli);
1122
1123 LASSERT(lsm);
8f18c8a4 1124 /* master object FID */
2e1b5b8b 1125 lli->lli_pfid = body->mbo_fid1;
8f18c8a4 1126 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
2de35386 1127 lli, PFID(fid), PFID(&lli->lli_pfid));
1128 unlock_new_inode(inode);
1129 }
1130
1131 return inode;
1132}
1133
1134static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1135{
1136 struct lmv_stripe_md *lsm = md->lmv;
1137 struct lu_fid *fid;
1138 int i;
1139
1140 LASSERT(lsm);
1141 /*
1142 * XXX sigh, this lsm_root initialization should be in
1143 * LMV layer, but it needs ll_iget right now, so we
1144 * put this here right now.
1145 */
1146 for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1147 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1148 LASSERT(!lsm->lsm_md_oinfo[i].lmo_root);
8f18c8a4 1149 /* Unfortunately ll_iget will call ll_update_inode,
1150 * where the initialization of slave inode is slightly
1151 * different, so it reset lsm_md to NULL to avoid
1152 * initializing lsm for slave inode.
1153 */
1154 /* For migrating inode, master stripe and master object will
1155 * be same, so we only need assign this inode
1156 */
1157 if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && !i)
2de35386 1158 lsm->lsm_md_oinfo[i].lmo_root = inode;
8f18c8a4 1159 else
2de35386 1160 lsm->lsm_md_oinfo[i].lmo_root =
1161 ll_iget_anon_dir(inode->i_sb, fid, md);
8f18c8a4 1162 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1163 int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
2de35386 1164
8f18c8a4 1165 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1166 return rc;
2de35386 1167 }
1168 }
1169
15b241c5 1170 return 0;
2de35386 1171}
1172
1173static inline int lli_lsm_md_eq(const struct lmv_stripe_md *lsm_md1,
1174 const struct lmv_stripe_md *lsm_md2)
1175{
1176 return lsm_md1->lsm_md_magic == lsm_md2->lsm_md_magic &&
1177 lsm_md1->lsm_md_stripe_count == lsm_md2->lsm_md_stripe_count &&
1178 lsm_md1->lsm_md_master_mdt_index ==
1179 lsm_md2->lsm_md_master_mdt_index &&
1180 lsm_md1->lsm_md_hash_type == lsm_md2->lsm_md_hash_type &&
1181 lsm_md1->lsm_md_layout_version ==
1182 lsm_md2->lsm_md_layout_version &&
1183 !strcmp(lsm_md1->lsm_md_pool_name,
1184 lsm_md2->lsm_md_pool_name);
1185}
1186
c3397e7e 1187static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
2de35386 1188{
1189 struct ll_inode_info *lli = ll_i2info(inode);
1190 struct lmv_stripe_md *lsm = md->lmv;
8f18c8a4 1191 int rc;
2de35386 1192
2de35386 1193 LASSERT(S_ISDIR(inode->i_mode));
79496845 1194 CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1195 PFID(ll_inode2fid(inode)));
1196
1197 /* no striped information from request. */
1198 if (!lsm) {
1199 if (!lli->lli_lsm_md) {
c3397e7e 1200 return 0;
8f18c8a4 1201 } else if (lli->lli_lsm_md->lsm_md_hash_type &
1202 LMV_HASH_FLAG_MIGRATION) {
79496845 1203 /*
1204 * migration is done, the temporay MIGRATE layout has
1205 * been removed
1206 */
1207 CDEBUG(D_INODE, DFID" finish migration.\n",
1208 PFID(ll_inode2fid(inode)));
1209 lmv_free_memmd(lli->lli_lsm_md);
1210 lli->lli_lsm_md = NULL;
c3397e7e 1211 return 0;
79496845 1212 } else {
1213 /*
1214 * The lustre_md from req does not include stripeEA,
1215 * see ll_md_setattr
1216 */
c3397e7e 1217 return 0;
79496845 1218 }
1219 }
1220
1221 /* set the directory layout */
2de35386 1222 if (!lli->lli_lsm_md) {
b1839e0e 1223 struct cl_attr *attr;
1224
2de35386 1225 rc = ll_init_lsm_md(inode, md);
c3397e7e 1226 if (rc)
1227 return rc;
1228
2de35386 1229 /*
1230 * set lsm_md to NULL, so the following free lustre_md
1231 * will not free this lsm
1232 */
1233 md->lmv = NULL;
b1839e0e 1234 lli->lli_lsm_md = lsm;
1235
1236 attr = kzalloc(sizeof(*attr), GFP_NOFS);
1237 if (!attr)
1238 return -ENOMEM;
1239
1240 /* validate the lsm */
1241 rc = md_merge_attr(ll_i2mdexp(inode), lsm, attr,
1242 ll_md_blocking_ast);
1243 if (rc) {
1244 kfree(attr);
1245 return rc;
1246 }
1247
1248 if (md->body->mbo_valid & OBD_MD_FLNLINK)
1249 md->body->mbo_nlink = attr->cat_nlink;
1250 if (md->body->mbo_valid & OBD_MD_FLSIZE)
1251 md->body->mbo_size = attr->cat_size;
1252 if (md->body->mbo_valid & OBD_MD_FLATIME)
1253 md->body->mbo_atime = attr->cat_atime;
1254 if (md->body->mbo_valid & OBD_MD_FLCTIME)
1255 md->body->mbo_ctime = attr->cat_ctime;
1256 if (md->body->mbo_valid & OBD_MD_FLMTIME)
1257 md->body->mbo_mtime = attr->cat_mtime;
1258
1259 kfree(attr);
1260
79496845 1261 CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm,
1262 lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
c3397e7e 1263 return 0;
2de35386 1264 }
1265
1266 /* Compare the old and new stripe information */
8f18c8a4 1267 if (!lsm_md_eq(lli->lli_lsm_md, lsm)) {
1268 struct lmv_stripe_md *old_lsm = lli->lli_lsm_md;
1269 int idx;
1270
1271 CERROR("%s: inode "DFID"(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
1272 ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid),
1273 inode, lsm, old_lsm,
1274 lsm->lsm_md_magic, old_lsm->lsm_md_magic,
2de35386 1275 lsm->lsm_md_stripe_count,
8f18c8a4 1276 old_lsm->lsm_md_stripe_count,
2de35386 1277 lsm->lsm_md_master_mdt_index,
8f18c8a4 1278 old_lsm->lsm_md_master_mdt_index,
1279 lsm->lsm_md_hash_type, old_lsm->lsm_md_hash_type,
2de35386 1280 lsm->lsm_md_layout_version,
8f18c8a4 1281 old_lsm->lsm_md_layout_version,
2de35386 1282 lsm->lsm_md_pool_name,
8f18c8a4 1283 old_lsm->lsm_md_pool_name);
1284
1285 for (idx = 0; idx < old_lsm->lsm_md_stripe_count; idx++) {
1286 CERROR("%s: sub FIDs in old lsm idx %d, old: "DFID"\n",
1287 ll_get_fsname(inode->i_sb, NULL, 0), idx,
1288 PFID(&old_lsm->lsm_md_oinfo[idx].lmo_fid));
1289 }
2de35386 1290
8f18c8a4 1291 for (idx = 0; idx < lsm->lsm_md_stripe_count; idx++) {
1292 CERROR("%s: sub FIDs in new lsm idx %d, new: "DFID"\n",
2de35386 1293 ll_get_fsname(inode->i_sb, NULL, 0), idx,
2de35386 1294 PFID(&lsm->lsm_md_oinfo[idx].lmo_fid));
2de35386 1295 }
8f18c8a4 1296
1297 return -EIO;
2de35386 1298 }
1299
8f18c8a4 1300 return 0;
2de35386 1301}
1302
d7e09d03
PT
1303void ll_clear_inode(struct inode *inode)
1304{
1305 struct ll_inode_info *lli = ll_i2info(inode);
1306 struct ll_sb_info *sbi = ll_i2sbi(inode);
d7e09d03 1307
97a075cd
JN
1308 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1309 PFID(ll_inode2fid(inode)), inode);
d7e09d03
PT
1310
1311 if (S_ISDIR(inode->i_mode)) {
1312 /* these should have been cleared in ll_file_release */
6e16818b
OD
1313 LASSERT(!lli->lli_opendir_key);
1314 LASSERT(!lli->lli_sai);
d7e09d03
PT
1315 LASSERT(lli->lli_opendir_pid == 0);
1316 }
1317
d7e09d03
PT
1318 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1319
1320 LASSERT(!lli->lli_open_fd_write_count);
1321 LASSERT(!lli->lli_open_fd_read_count);
1322 LASSERT(!lli->lli_open_fd_exec_count);
1323
1324 if (lli->lli_mds_write_och)
1325 ll_md_real_close(inode, FMODE_WRITE);
1326 if (lli->lli_mds_exec_och)
1327 ll_md_real_close(inode, FMODE_EXEC);
1328 if (lli->lli_mds_read_och)
1329 ll_md_real_close(inode, FMODE_READ);
1330
a5cb8880 1331 if (S_ISLNK(inode->i_mode)) {
97903a26 1332 kfree(lli->lli_symlink_name);
d7e09d03
PT
1333 lli->lli_symlink_name = NULL;
1334 }
1335
7fc1f831
AP
1336 ll_xattr_cache_destroy(inode);
1337
d7e09d03 1338#ifdef CONFIG_FS_POSIX_ACL
341f1f0a 1339 if (lli->lli_posix_acl) {
d7e09d03
PT
1340 posix_acl_release(lli->lli_posix_acl);
1341 lli->lli_posix_acl = NULL;
1342 }
1343#endif
1344 lli->lli_inode_magic = LLI_INODE_DEAD;
1345
2de35386 1346 if (S_ISDIR(inode->i_mode))
1347 ll_dir_clear_lsm_md(inode);
c3397e7e 1348 if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
d7e09d03
PT
1349 LASSERT(list_empty(&lli->lli_agl_list));
1350
1351 /*
1352 * XXX This has to be done before lsm is freed below, because
1353 * cl_object still uses inode lsm.
1354 */
1355 cl_inode_fini(inode);
d7e09d03
PT
1356}
1357
b81f9b6d
OD
1358#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
1359
f28f1a45 1360static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
d7e09d03
PT
1361{
1362 struct lustre_md md;
2b0143b5 1363 struct inode *inode = d_inode(dentry);
d7e09d03
PT
1364 struct ll_sb_info *sbi = ll_i2sbi(inode);
1365 struct ptlrpc_request *request = NULL;
1366 int rc, ia_valid;
d7e09d03
PT
1367
1368 op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1369 LUSTRE_OPC_ANY, NULL);
1370 if (IS_ERR(op_data))
0a3bdb00 1371 return PTR_ERR(op_data);
d7e09d03 1372
f28f1a45 1373 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
d7e09d03
PT
1374 if (rc) {
1375 ptlrpc_req_finished(request);
1376 if (rc == -ENOENT) {
1377 clear_nlink(inode);
1378 /* Unlinked special device node? Or just a race?
c0894c6c
OD
1379 * Pretend we did everything.
1380 */
d7e09d03
PT
1381 if (!S_ISREG(inode->i_mode) &&
1382 !S_ISDIR(inode->i_mode)) {
1383 ia_valid = op_data->op_attr.ia_valid;
1384 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1385 rc = simple_setattr(dentry, &op_data->op_attr);
1386 op_data->op_attr.ia_valid = ia_valid;
1387 }
1388 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1389 CERROR("md_setattr fails: rc = %d\n", rc);
1390 }
0a3bdb00 1391 return rc;
d7e09d03
PT
1392 }
1393
1394 rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1395 sbi->ll_md_exp, &md);
1396 if (rc) {
1397 ptlrpc_req_finished(request);
0a3bdb00 1398 return rc;
d7e09d03
PT
1399 }
1400
251c4317 1401 ia_valid = op_data->op_attr.ia_valid;
ef2e0f55 1402 /* inode size will be in cl_setattr_ost, can't do it now since dirty
c0894c6c
OD
1403 * cache is not cleared yet.
1404 */
251c4317 1405 op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1ed32aed
JX
1406 if (S_ISREG(inode->i_mode))
1407 inode_lock(inode);
251c4317 1408 rc = simple_setattr(dentry, &op_data->op_attr);
1ed32aed
JX
1409 if (S_ISREG(inode->i_mode))
1410 inode_unlock(inode);
251c4317
JH
1411 op_data->op_attr.ia_valid = ia_valid;
1412
c3397e7e 1413 rc = ll_update_inode(inode, &md);
d7e09d03
PT
1414 ptlrpc_req_finished(request);
1415
0a3bdb00 1416 return rc;
d7e09d03
PT
1417}
1418
d7e09d03
PT
1419/* If this inode has objects allocated to it (lsm != NULL), then the OST
1420 * object(s) determine the file size and mtime. Otherwise, the MDS will
1421 * keep these values until such a time that objects are allocated for it.
1422 * We do the MDS operations first, as it is checking permissions for us.
1423 * We don't to the MDS RPC if there is nothing that we want to store there,
1424 * otherwise there is no harm in updating mtime/atime on the MDS if we are
1425 * going to do an RPC anyways.
1426 *
1427 * If we are doing a truncate, we will send the mtime and ctime updates
1428 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1429 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1430 * at the same time.
a720b790
JL
1431 *
1432 * In case of HSMimport, we only set attr on MDS.
d7e09d03 1433 */
a720b790 1434int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
d7e09d03 1435{
2b0143b5 1436 struct inode *inode = d_inode(dentry);
d7e09d03
PT
1437 struct ll_inode_info *lli = ll_i2info(inode);
1438 struct md_op_data *op_data = NULL;
0cd99931 1439 int rc = 0;
d7e09d03 1440
97a075cd
JN
1441 CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
1442 ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
1443 i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
d7e09d03
PT
1444
1445 if (attr->ia_valid & ATTR_SIZE) {
1446 /* Check new size against VFS/VM file size limit and rlimit */
1447 rc = inode_newsize_ok(inode, attr->ia_size);
1448 if (rc)
0a3bdb00 1449 return rc;
d7e09d03
PT
1450
1451 /* The maximum Lustre file size is variable, based on the
1452 * OST maximum object size and number of stripes. This
c0894c6c
OD
1453 * needs another check in addition to the VFS check above.
1454 */
d7e09d03 1455 if (attr->ia_size > ll_file_maxbytes(inode)) {
1d8cb70c 1456 CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
d7e09d03
PT
1457 PFID(&lli->lli_fid), attr->ia_size,
1458 ll_file_maxbytes(inode));
0a3bdb00 1459 return -EFBIG;
d7e09d03
PT
1460 }
1461
1462 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1463 }
1464
31051c85 1465 /* POSIX: check before ATTR_*TIME_SET set (from setattr_prepare) */
d7e09d03 1466 if (attr->ia_valid & TIMES_SET_FLAGS) {
4b1a25f0 1467 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2eb90a75 1468 !capable(CFS_CAP_FOWNER))
0a3bdb00 1469 return -EPERM;
d7e09d03
PT
1470 }
1471
1472 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1473 if (attr->ia_valid & ATTR_CTIME) {
0f1c743b 1474 attr->ia_ctime = CURRENT_TIME;
d7e09d03
PT
1475 attr->ia_valid |= ATTR_CTIME_SET;
1476 }
1477 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1478 (attr->ia_valid & ATTR_ATIME)) {
0f1c743b 1479 attr->ia_atime = CURRENT_TIME;
d7e09d03
PT
1480 attr->ia_valid |= ATTR_ATIME_SET;
1481 }
1482 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1483 (attr->ia_valid & ATTR_MTIME)) {
0f1c743b 1484 attr->ia_mtime = CURRENT_TIME;
d7e09d03
PT
1485 attr->ia_valid |= ATTR_MTIME_SET;
1486 }
1487
1488 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
8d7eed54 1489 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %llu\n",
d7e09d03 1490 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
8d7eed54 1491 (s64)ktime_get_real_seconds());
d7e09d03 1492
1ed32aed 1493 if (S_ISREG(inode->i_mode))
5955102c 1494 inode_unlock(inode);
d7e09d03 1495
1ed32aed
JX
1496 /*
1497 * We always do an MDS RPC, even if we're only changing the size;
1498 * only the MDS knows whether truncate() should fail with -ETXTBUSY
5ea17d6c 1499 */
1ed32aed
JX
1500 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
1501 if (!op_data) {
1502 rc = -ENOMEM;
1503 goto out;
1504 }
5ea17d6c 1505
1ed32aed 1506 op_data->op_attr = *attr;
55554f31 1507
1ed32aed 1508 if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
55554f31 1509 /*
1ed32aed
JX
1510 * If we are changing file size, file content is
1511 * modified, flag it.
55554f31 1512 */
1ed32aed
JX
1513 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1514 op_data->op_bias |= MDS_DATA_MODIFIED;
5ea17d6c
JL
1515 }
1516
f28f1a45 1517 rc = ll_md_setattr(dentry, op_data);
d7e09d03 1518 if (rc)
34e1f2bb 1519 goto out;
d7e09d03 1520
1ed32aed 1521 if (!S_ISREG(inode->i_mode) || hsm_import) {
34e1f2bb
JL
1522 rc = 0;
1523 goto out;
1524 }
d7e09d03
PT
1525
1526 if (attr->ia_valid & (ATTR_SIZE |
1527 ATTR_ATIME | ATTR_ATIME_SET |
53bd4a00 1528 ATTR_MTIME | ATTR_MTIME_SET)) {
d7e09d03
PT
1529 /* For truncate and utimes sending attributes to OSTs, setting
1530 * mtime/atime to the past will be performed under PW [0:EOF]
1531 * extent lock (new_size:EOF for truncate). It may seem
1532 * excessive to send mtime/atime updates to OSTs when not
1533 * setting times to past, but it is necessary due to possible
c0894c6c
OD
1534 * time de-synchronization between MDT inode and OST objects
1535 */
933eb397 1536 rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, attr, 0);
53bd4a00 1537 }
1ed32aed
JX
1538
1539 /*
1540 * If the file was restored, it needs to set dirty flag.
1541 *
1542 * We've already sent MDS_DATA_MODIFIED flag in
1543 * ll_md_setattr() for truncate. However, the MDT refuses to
1544 * set the HS_DIRTY flag on released files, so we have to set
1545 * it again if the file has been restored. Please check how
1546 * LLIF_DATA_MODIFIED is set in vvp_io_setattr_fini().
1547 *
1548 * Please notice that if the file is not released, the previous
1549 * MDS_DATA_MODIFIED has taken effect and usually
1550 * LLIF_DATA_MODIFIED is not set(see vvp_io_setattr_fini()).
1551 * This way we can save an RPC for common open + trunc
1552 * operation.
1553 */
1554 if (test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags)) {
1555 struct hsm_state_set hss = {
1556 .hss_valid = HSS_SETMASK,
1557 .hss_setmask = HS_DIRTY,
1558 };
1559 int rc2;
1560
1561 rc2 = ll_hsm_state_set(inode, &hss);
1562 if (rc2 < 0)
1563 CERROR(DFID "HSM set dirty failed: rc2 = %d\n",
1564 PFID(ll_inode2fid(inode)), rc2);
1565 }
1566
d7e09d03 1567out:
0cd99931
JH
1568 if (op_data)
1569 ll_finish_md_op_data(op_data);
83d6b8fe 1570
1ed32aed 1571 if (S_ISREG(inode->i_mode)) {
5955102c 1572 inode_lock(inode);
a720b790 1573 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
d7e09d03
PT
1574 inode_dio_wait(inode);
1575 }
1576
1577 ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1578 LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1579
251c4317 1580 return rc;
d7e09d03
PT
1581}
1582
1583int ll_setattr(struct dentry *de, struct iattr *attr)
1584{
2b0143b5 1585 int mode = d_inode(de)->i_mode;
d7e09d03 1586
cd94f231
OD
1587 if ((attr->ia_valid & (ATTR_CTIME | ATTR_SIZE | ATTR_MODE)) ==
1588 (ATTR_CTIME | ATTR_SIZE | ATTR_MODE))
d7e09d03
PT
1589 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1590
cd94f231
OD
1591 if (((attr->ia_valid & (ATTR_MODE | ATTR_FORCE | ATTR_SIZE)) ==
1592 (ATTR_SIZE | ATTR_MODE)) &&
d7e09d03 1593 (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
52048862 1594 (((mode & (S_ISGID | 0010)) == (S_ISGID | 0010)) &&
d7e09d03
PT
1595 !(attr->ia_mode & S_ISGID))))
1596 attr->ia_valid |= ATTR_FORCE;
1597
98639249
NC
1598 if ((attr->ia_valid & ATTR_MODE) &&
1599 (mode & S_ISUID) &&
d7e09d03
PT
1600 !(attr->ia_mode & S_ISUID) &&
1601 !(attr->ia_valid & ATTR_KILL_SUID))
1602 attr->ia_valid |= ATTR_KILL_SUID;
1603
98639249 1604 if ((attr->ia_valid & ATTR_MODE) &&
52048862 1605 ((mode & (S_ISGID | 0010)) == (S_ISGID | 0010)) &&
d7e09d03
PT
1606 !(attr->ia_mode & S_ISGID) &&
1607 !(attr->ia_valid & ATTR_KILL_SGID))
1608 attr->ia_valid |= ATTR_KILL_SGID;
1609
a720b790 1610 return ll_setattr_raw(de, attr, false);
d7e09d03
PT
1611}
1612
1613int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1614 __u64 max_age, __u32 flags)
1615{
1616 struct ll_sb_info *sbi = ll_s2sbi(sb);
1617 struct obd_statfs obd_osfs;
1618 int rc;
d7e09d03
PT
1619
1620 rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1621 if (rc) {
1622 CERROR("md_statfs fails: rc = %d\n", rc);
0a3bdb00 1623 return rc;
d7e09d03
PT
1624 }
1625
1626 osfs->os_type = sb->s_magic;
1627
b0f5aad5 1628 CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
1d8cb70c
GD
1629 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,
1630 osfs->os_files);
d7e09d03
PT
1631
1632 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1633 flags |= OBD_STATFS_NODELAY;
1634
1635 rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1636 if (rc) {
1637 CERROR("obd_statfs fails: rc = %d\n", rc);
0a3bdb00 1638 return rc;
d7e09d03
PT
1639 }
1640
b0f5aad5 1641 CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
d7e09d03
PT
1642 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1643 obd_osfs.os_files);
1644
1645 osfs->os_bsize = obd_osfs.os_bsize;
1646 osfs->os_blocks = obd_osfs.os_blocks;
1647 osfs->os_bfree = obd_osfs.os_bfree;
1648 osfs->os_bavail = obd_osfs.os_bavail;
1649
1650 /* If we don't have as many objects free on the OST as inodes
1651 * on the MDS, we reduce the total number of inodes to
1652 * compensate, so that the "inodes in use" number is correct.
1653 */
1654 if (obd_osfs.os_ffree < osfs->os_ffree) {
1655 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1656 obd_osfs.os_ffree;
1657 osfs->os_ffree = obd_osfs.os_ffree;
1658 }
1659
0a3bdb00 1660 return rc;
d7e09d03 1661}
c9f6bb96 1662
d7e09d03
PT
1663int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1664{
1665 struct super_block *sb = de->d_sb;
1666 struct obd_statfs osfs;
1667 int rc;
1668
b0f5aad5 1669 CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64());
d7e09d03
PT
1670 ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1671
1672 /* Some amount of caching on the client is allowed */
1673 rc = ll_statfs_internal(sb, &osfs,
1674 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1675 0);
1676 if (rc)
1677 return rc;
1678
1679 statfs_unpack(sfs, &osfs);
1680
1681 /* We need to downshift for all 32-bit kernels, because we can't
1682 * tell if the kernel is being called via sys_statfs64() or not.
1683 * Stop before overflowing f_bsize - in which case it is better
c0894c6c
OD
1684 * to just risk EOVERFLOW if caller is using old sys_statfs().
1685 */
d7e09d03
PT
1686 if (sizeof(long) < 8) {
1687 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1688 sfs->f_bsize <<= 1;
1689
1690 osfs.os_blocks >>= 1;
1691 osfs.os_bfree >>= 1;
1692 osfs.os_bavail >>= 1;
1693 }
1694 }
1695
1696 sfs->f_blocks = osfs.os_blocks;
1697 sfs->f_bfree = osfs.os_bfree;
1698 sfs->f_bavail = osfs.os_bavail;
bd994071 1699 sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
d7e09d03
PT
1700 return 0;
1701}
1702
1703void ll_inode_size_lock(struct inode *inode)
1704{
1705 struct ll_inode_info *lli;
1706
1707 LASSERT(!S_ISDIR(inode->i_mode));
1708
1709 lli = ll_i2info(inode);
47a57bde 1710 mutex_lock(&lli->lli_size_mutex);
d7e09d03
PT
1711}
1712
1713void ll_inode_size_unlock(struct inode *inode)
1714{
1715 struct ll_inode_info *lli;
1716
1717 lli = ll_i2info(inode);
47a57bde 1718 mutex_unlock(&lli->lli_size_mutex);
d7e09d03
PT
1719}
1720
c3397e7e 1721int ll_update_inode(struct inode *inode, struct lustre_md *md)
d7e09d03
PT
1722{
1723 struct ll_inode_info *lli = ll_i2info(inode);
1724 struct mdt_body *body = md->body;
d7e09d03
PT
1725 struct ll_sb_info *sbi = ll_i2sbi(inode);
1726
55051039 1727 if (body->mbo_valid & OBD_MD_FLEASIZE)
85cb63bc 1728 cl_file_inode_init(inode, md);
d7e09d03 1729
c3397e7e 1730 if (S_ISDIR(inode->i_mode)) {
1731 int rc;
1732
1733 rc = ll_update_lsm_md(inode, md);
1734 if (rc)
1735 return rc;
1736 }
2de35386 1737
d7e09d03 1738#ifdef CONFIG_FS_POSIX_ACL
2e1b5b8b 1739 if (body->mbo_valid & OBD_MD_FLACL) {
d7e09d03
PT
1740 spin_lock(&lli->lli_lock);
1741 if (lli->lli_posix_acl)
1742 posix_acl_release(lli->lli_posix_acl);
1743 lli->lli_posix_acl = md->posix_acl;
1744 spin_unlock(&lli->lli_lock);
1745 }
1746#endif
2e1b5b8b 1747 inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
c1e2699d 1748 sbi->ll_flags & LL_SBI_32BIT_API);
2e1b5b8b 1749 inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
d7e09d03 1750
2e1b5b8b
JH
1751 if (body->mbo_valid & OBD_MD_FLATIME) {
1752 if (body->mbo_atime > LTIME_S(inode->i_atime))
1753 LTIME_S(inode->i_atime) = body->mbo_atime;
1754 lli->lli_atime = body->mbo_atime;
d7e09d03 1755 }
2e1b5b8b
JH
1756 if (body->mbo_valid & OBD_MD_FLMTIME) {
1757 if (body->mbo_mtime > LTIME_S(inode->i_mtime)) {
b0f5aad5
GKH
1758 CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
1759 inode->i_ino, LTIME_S(inode->i_mtime),
2e1b5b8b
JH
1760 body->mbo_mtime);
1761 LTIME_S(inode->i_mtime) = body->mbo_mtime;
d7e09d03 1762 }
2e1b5b8b 1763 lli->lli_mtime = body->mbo_mtime;
d7e09d03 1764 }
2e1b5b8b
JH
1765 if (body->mbo_valid & OBD_MD_FLCTIME) {
1766 if (body->mbo_ctime > LTIME_S(inode->i_ctime))
1767 LTIME_S(inode->i_ctime) = body->mbo_ctime;
1768 lli->lli_ctime = body->mbo_ctime;
d7e09d03 1769 }
2e1b5b8b 1770 if (body->mbo_valid & OBD_MD_FLMODE)
cd94f231
OD
1771 inode->i_mode = (inode->i_mode & S_IFMT) |
1772 (body->mbo_mode & ~S_IFMT);
2e1b5b8b 1773 if (body->mbo_valid & OBD_MD_FLTYPE)
cd94f231
OD
1774 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1775 (body->mbo_mode & S_IFMT);
d7e09d03 1776 LASSERT(inode->i_mode != 0);
566be54d 1777 if (S_ISREG(inode->i_mode))
e6768831
TJ
1778 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
1779 LL_MAX_BLKSIZE_BITS);
566be54d 1780 else
d7e09d03 1781 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
2e1b5b8b
JH
1782 if (body->mbo_valid & OBD_MD_FLUID)
1783 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
1784 if (body->mbo_valid & OBD_MD_FLGID)
1785 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
1786 if (body->mbo_valid & OBD_MD_FLFLAGS)
1787 inode->i_flags = ll_ext_to_inode_flags(body->mbo_flags);
1788 if (body->mbo_valid & OBD_MD_FLNLINK)
1789 set_nlink(inode, body->mbo_nlink);
1790 if (body->mbo_valid & OBD_MD_FLRDEV)
1791 inode->i_rdev = old_decode_dev(body->mbo_rdev);
1792
1793 if (body->mbo_valid & OBD_MD_FLID) {
d7e09d03
PT
1794 /* FID shouldn't be changed! */
1795 if (fid_is_sane(&lli->lli_fid)) {
2e1b5b8b 1796 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
97a075cd 1797 "Trying to change FID "DFID" to the "DFID", inode "DFID"(%p)\n",
2e1b5b8b 1798 PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
97a075cd 1799 PFID(ll_inode2fid(inode)), inode);
da5ecb4d 1800 } else {
2e1b5b8b 1801 lli->lli_fid = body->mbo_fid1;
da5ecb4d 1802 }
d7e09d03
PT
1803 }
1804
1805 LASSERT(fid_seq(&lli->lli_fid) != 0);
1806
2e1b5b8b 1807 if (body->mbo_valid & OBD_MD_FLSIZE) {
0cd99931 1808 i_size_write(inode, body->mbo_size);
d7e09d03 1809
0cd99931
JH
1810 CDEBUG(D_VFSTRACE, "inode=" DFID ", updating i_size %llu\n",
1811 PFID(ll_inode2fid(inode)),
1812 (unsigned long long)body->mbo_size);
d7e09d03 1813
2e1b5b8b
JH
1814 if (body->mbo_valid & OBD_MD_FLBLOCKS)
1815 inode->i_blocks = body->mbo_blocks;
d7e09d03
PT
1816 }
1817
2e1b5b8b
JH
1818 if (body->mbo_valid & OBD_MD_TSTATE) {
1819 if (body->mbo_t_state & MS_RESTORE)
219c0c45 1820 set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
5ea17d6c 1821 }
c3397e7e 1822
1823 return 0;
d7e09d03
PT
1824}
1825
c3397e7e 1826int ll_read_inode2(struct inode *inode, void *opaque)
d7e09d03
PT
1827{
1828 struct lustre_md *md = opaque;
1829 struct ll_inode_info *lli = ll_i2info(inode);
c3397e7e 1830 int rc;
d7e09d03
PT
1831
1832 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1833 PFID(&lli->lli_fid), inode);
1834
d7e09d03
PT
1835 /* Core attributes from the MDS first. This is a new inode, and
1836 * the VFS doesn't zero times in the core inode so we have to do
1837 * it ourselves. They will be overwritten by either MDS or OST
c0894c6c
OD
1838 * attributes - we just need to make sure they aren't newer.
1839 */
d7e09d03
PT
1840 LTIME_S(inode->i_mtime) = 0;
1841 LTIME_S(inode->i_atime) = 0;
1842 LTIME_S(inode->i_ctime) = 0;
1843 inode->i_rdev = 0;
c3397e7e 1844 rc = ll_update_inode(inode, md);
1845 if (rc)
1846 return rc;
d7e09d03
PT
1847
1848 /* OIDEBUG(inode); */
1849
d7e09d03
PT
1850 if (S_ISREG(inode->i_mode)) {
1851 struct ll_sb_info *sbi = ll_i2sbi(inode);
cf29a7b6 1852
d7e09d03
PT
1853 inode->i_op = &ll_file_inode_operations;
1854 inode->i_fop = sbi->ll_fop;
1855 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
d7e09d03
PT
1856 } else if (S_ISDIR(inode->i_mode)) {
1857 inode->i_op = &ll_dir_inode_operations;
1858 inode->i_fop = &ll_dir_operations;
d7e09d03
PT
1859 } else if (S_ISLNK(inode->i_mode)) {
1860 inode->i_op = &ll_fast_symlink_inode_operations;
d7e09d03
PT
1861 } else {
1862 inode->i_op = &ll_special_inode_operations;
1863
1864 init_special_inode(inode, inode->i_mode,
1865 inode->i_rdev);
d7e09d03 1866 }
c3397e7e 1867
1868 return 0;
d7e09d03
PT
1869}
1870
1871void ll_delete_inode(struct inode *inode)
1872{
1929c433 1873 struct ll_inode_info *lli = ll_i2info(inode);
d7e09d03 1874
6e16818b 1875 if (S_ISREG(inode->i_mode) && lli->lli_clob)
d7e09d03 1876 /* discard all dirty pages before truncating them, required by
c0894c6c
OD
1877 * osc_extent implementation at LU-1030.
1878 */
65fb55d1 1879 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
7510c5ca 1880 CL_FSYNC_LOCAL, 1);
d7e09d03 1881
91b0abe3 1882 truncate_inode_pages_final(&inode->i_data);
d7e09d03 1883
7510c5ca
YS
1884 LASSERTF(!inode->i_data.nrpages,
1885 "inode=" DFID "(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
1886 PFID(ll_inode2fid(inode)), inode, inode->i_data.nrpages);
d7e09d03
PT
1887
1888 ll_clear_inode(inode);
1889 clear_inode(inode);
d7e09d03
PT
1890}
1891
1892int ll_iocontrol(struct inode *inode, struct file *file,
1893 unsigned int cmd, unsigned long arg)
1894{
1895 struct ll_sb_info *sbi = ll_i2sbi(inode);
1896 struct ptlrpc_request *req = NULL;
1897 int rc, flags = 0;
d7e09d03 1898
a58a38ac 1899 switch (cmd) {
d7e09d03
PT
1900 case FSFILT_IOC_GETFLAGS: {
1901 struct mdt_body *body;
1902 struct md_op_data *op_data;
1903
1904 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1905 0, 0, LUSTRE_OPC_ANY,
1906 NULL);
1907 if (IS_ERR(op_data))
0a3bdb00 1908 return PTR_ERR(op_data);
d7e09d03
PT
1909
1910 op_data->op_valid = OBD_MD_FLFLAGS;
1911 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1912 ll_finish_md_op_data(op_data);
1913 if (rc) {
97a075cd
JN
1914 CERROR("%s: failure inode "DFID": rc = %d\n",
1915 sbi->ll_md_exp->exp_obd->obd_name,
1916 PFID(ll_inode2fid(inode)), rc);
0a3bdb00 1917 return -abs(rc);
d7e09d03
PT
1918 }
1919
1920 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1921
2e1b5b8b 1922 flags = body->mbo_flags;
d7e09d03
PT
1923
1924 ptlrpc_req_finished(req);
1925
7ac5db21 1926 return put_user(flags, (int __user *)arg);
d7e09d03
PT
1927 }
1928 case FSFILT_IOC_SETFLAGS: {
d7e09d03 1929 struct md_op_data *op_data;
933eb397
JH
1930 struct cl_object *obj;
1931 struct iattr *attr;
d7e09d03 1932
7ac5db21 1933 if (get_user(flags, (int __user *)arg))
0a3bdb00 1934 return -EFAULT;
d7e09d03
PT
1935
1936 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1937 LUSTRE_OPC_ANY, NULL);
1938 if (IS_ERR(op_data))
0a3bdb00 1939 return PTR_ERR(op_data);
d7e09d03 1940
bb41292b 1941 op_data->op_attr_flags = flags;
d7e09d03 1942 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
f28f1a45 1943 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
d7e09d03
PT
1944 ll_finish_md_op_data(op_data);
1945 ptlrpc_req_finished(req);
1946 if (rc)
0a3bdb00 1947 return rc;
d7e09d03
PT
1948
1949 inode->i_flags = ll_ext_to_inode_flags(flags);
1950
933eb397
JH
1951 obj = ll_i2info(inode)->lli_clob;
1952 if (!obj)
0a3bdb00 1953 return 0;
d7e09d03 1954
933eb397
JH
1955 attr = kzalloc(sizeof(*attr), GFP_NOFS);
1956 if (!attr)
0a3bdb00 1957 return -ENOMEM;
d7e09d03 1958
933eb397
JH
1959 attr->ia_valid = ATTR_ATTR_FLAG;
1960 rc = cl_setattr_ost(obj, attr, flags);
1961 kfree(attr);
0a3bdb00 1962 return rc;
d7e09d03
PT
1963 }
1964 default:
0a3bdb00 1965 return -ENOSYS;
d7e09d03
PT
1966 }
1967
0a3bdb00 1968 return 0;
d7e09d03
PT
1969}
1970
1971int ll_flush_ctx(struct inode *inode)
1972{
1973 struct ll_sb_info *sbi = ll_i2sbi(inode);
1974
4b1a25f0 1975 CDEBUG(D_SEC, "flush context for user %d\n",
e15ba45d 1976 from_kuid(&init_user_ns, current_uid()));
d7e09d03
PT
1977
1978 obd_set_info_async(NULL, sbi->ll_md_exp,
1979 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1980 0, NULL, NULL);
1981 obd_set_info_async(NULL, sbi->ll_dt_exp,
1982 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1983 0, NULL, NULL);
1984 return 0;
1985}
1986
1987/* umount -f client means force down, don't save state */
1988void ll_umount_begin(struct super_block *sb)
1989{
1990 struct ll_sb_info *sbi = ll_s2sbi(sb);
1991 struct obd_device *obd;
1992 struct obd_ioctl_data *ioc_data;
d7e09d03
PT
1993
1994 CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
1995 sb->s_count, atomic_read(&sb->s_active));
1996
1997 obd = class_exp2obd(sbi->ll_md_exp);
6e16818b 1998 if (!obd) {
55f5a824 1999 CERROR("Invalid MDC connection handle %#llx\n",
d7e09d03 2000 sbi->ll_md_exp->exp_handle.h_cookie);
d7e09d03
PT
2001 return;
2002 }
2003 obd->obd_force = 1;
2004
2005 obd = class_exp2obd(sbi->ll_dt_exp);
6e16818b 2006 if (!obd) {
55f5a824 2007 CERROR("Invalid LOV connection handle %#llx\n",
d7e09d03 2008 sbi->ll_dt_exp->exp_handle.h_cookie);
d7e09d03
PT
2009 return;
2010 }
2011 obd->obd_force = 1;
2012
496a51bd 2013 ioc_data = kzalloc(sizeof(*ioc_data), GFP_NOFS);
d7e09d03
PT
2014 if (ioc_data) {
2015 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
ec83e611 2016 sizeof(*ioc_data), ioc_data, NULL);
d7e09d03
PT
2017
2018 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
ec83e611 2019 sizeof(*ioc_data), ioc_data, NULL);
d7e09d03 2020
97903a26 2021 kfree(ioc_data);
d7e09d03
PT
2022 }
2023
d7e09d03
PT
2024 /* Really, we'd like to wait until there are no requests outstanding,
2025 * and then continue. For now, we just invalidate the requests,
2026 * schedule() and sleep one second if needed, and hope.
2027 */
2028 schedule();
d7e09d03
PT
2029}
2030
2031int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2032{
2033 struct ll_sb_info *sbi = ll_s2sbi(sb);
2034 char *profilenm = get_profile_name(sb);
2035 int err;
2036 __u32 read_only;
2037
2038 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2039 read_only = *flags & MS_RDONLY;
2040 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2041 sizeof(KEY_READ_ONLY),
2042 KEY_READ_ONLY, sizeof(read_only),
2043 &read_only, NULL);
2044 if (err) {
2045 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2046 profilenm, read_only ?
2047 "read-only" : "read-write", err);
2048 return err;
2049 }
2050
2051 if (read_only)
2052 sb->s_flags |= MS_RDONLY;
2053 else
2054 sb->s_flags &= ~MS_RDONLY;
2055
2056 if (sbi->ll_flags & LL_SBI_VERBOSE)
2057 LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2058 read_only ? "read-only" : "read-write");
2059 }
2060 return 0;
2061}
2062
44ecac68
FY
2063/**
2064 * Cleanup the open handle that is cached on MDT-side.
2065 *
2066 * For open case, the client side open handling thread may hit error
2067 * after the MDT grant the open. Under such case, the client should
2068 * send close RPC to the MDT as cleanup; otherwise, the open handle
2069 * on the MDT will be leaked there until the client umount or evicted.
2070 *
2071 * In further, if someone unlinked the file, because the open handle
2072 * holds the reference on such file/object, then it will block the
2073 * subsequent threads that want to locate such object via FID.
2074 *
2075 * \param[in] sb super block for this file-system
2076 * \param[in] open_req pointer to the original open request
2077 */
2078void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2079{
2080 struct mdt_body *body;
2081 struct md_op_data *op_data;
2082 struct ptlrpc_request *close_req = NULL;
2083 struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp;
2084
2085 body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
af13af52 2086 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
83ea341d 2087 if (!op_data)
44ecac68 2088 return;
44ecac68 2089
2e1b5b8b 2090 op_data->op_fid1 = body->mbo_fid1;
2e1b5b8b 2091 op_data->op_handle = body->mbo_handle;
44ecac68
FY
2092 op_data->op_mod_time = get_seconds();
2093 md_close(exp, op_data, NULL, &close_req);
2094 ptlrpc_req_finished(close_req);
2095 ll_finish_md_op_data(op_data);
2096}
2097
d7e09d03
PT
2098int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2099 struct super_block *sb, struct lookup_intent *it)
2100{
2101 struct ll_sb_info *sbi = NULL;
24af3e16 2102 struct lustre_md md = { NULL };
d7e09d03 2103 int rc;
d7e09d03
PT
2104
2105 LASSERT(*inode || sb);
2106 sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2107 rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2108 sbi->ll_md_exp, &md);
2109 if (rc)
44ecac68 2110 goto cleanup;
d7e09d03
PT
2111
2112 if (*inode) {
c3397e7e 2113 rc = ll_update_inode(*inode, &md);
2114 if (rc)
2115 goto out;
d7e09d03 2116 } else {
6e16818b 2117 LASSERT(sb);
d7e09d03
PT
2118
2119 /*
2120 * At this point server returns to client's same fid as client
2121 * generated for creating. So using ->fid1 is okay here.
2122 */
2e1b5b8b 2123 if (!fid_is_sane(&md.body->mbo_fid1)) {
c681528a
SC
2124 CERROR("%s: Fid is insane " DFID "\n",
2125 ll_get_fsname(sb, NULL, 0),
2e1b5b8b 2126 PFID(&md.body->mbo_fid1));
c681528a
SC
2127 rc = -EINVAL;
2128 goto out;
2129 }
d7e09d03 2130
2e1b5b8b 2131 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
c1e2699d 2132 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03 2133 &md);
c3397e7e 2134 if (IS_ERR(*inode)) {
d7e09d03
PT
2135#ifdef CONFIG_FS_POSIX_ACL
2136 if (md.posix_acl) {
2137 posix_acl_release(md.posix_acl);
2138 md.posix_acl = NULL;
2139 }
2140#endif
020ecc6f 2141 rc = -ENOMEM;
d7e09d03 2142 CERROR("new_inode -fatal: rc %d\n", rc);
34e1f2bb 2143 goto out;
d7e09d03
PT
2144 }
2145 }
2146
2147 /* Handling piggyback layout lock.
2148 * Layout lock can be piggybacked by getattr and open request.
2149 * The lsm can be applied to inode only if it comes with a layout lock
2150 * otherwise correct layout may be overwritten, for example:
2151 * 1. proc1: mdt returns a lsm but not granting layout
2152 * 2. layout was changed by another client
2153 * 3. proc2: refresh layout and layout lock granted
c0894c6c
OD
2154 * 4. proc1: to apply a stale layout
2155 */
e476f2e5 2156 if (it && it->it_lock_mode != 0) {
d7e09d03
PT
2157 struct lustre_handle lockh;
2158 struct ldlm_lock *lock;
2159
e476f2e5 2160 lockh.cookie = it->it_lock_handle;
d7e09d03 2161 lock = ldlm_handle2lock(&lockh);
6e16818b 2162 LASSERT(lock);
d7e09d03
PT
2163 if (ldlm_has_layout(lock)) {
2164 struct cl_object_conf conf;
2165
2166 memset(&conf, 0, sizeof(conf));
2167 conf.coc_opc = OBJECT_CONF_SET;
2168 conf.coc_inode = *inode;
2169 conf.coc_lock = lock;
55051039 2170 conf.u.coc_layout = md.layout;
d7e09d03
PT
2171 (void)ll_layout_conf(*inode, &conf);
2172 }
2173 LDLM_LOCK_PUT(lock);
2174 }
2175
2176out:
d7e09d03 2177 md_free_lustre_md(sbi->ll_md_exp, &md);
44ecac68
FY
2178cleanup:
2179 if (rc != 0 && it && it->it_op & IT_OPEN)
2180 ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
2181
0a3bdb00 2182 return rc;
d7e09d03
PT
2183}
2184
4c6243ec 2185int ll_obd_statfs(struct inode *inode, void __user *arg)
d7e09d03
PT
2186{
2187 struct ll_sb_info *sbi = NULL;
2188 struct obd_export *exp;
2189 char *buf = NULL;
2190 struct obd_ioctl_data *data = NULL;
2191 __u32 type;
d7e09d03
PT
2192 int len = 0, rc;
2193
c650ba73
TR
2194 if (!inode) {
2195 rc = -EINVAL;
2196 goto out_statfs;
2197 }
2198
2199 sbi = ll_i2sbi(inode);
2200 if (!sbi) {
34e1f2bb
JL
2201 rc = -EINVAL;
2202 goto out_statfs;
2203 }
d7e09d03
PT
2204
2205 rc = obd_ioctl_getdata(&buf, &len, arg);
2206 if (rc)
34e1f2bb 2207 goto out_statfs;
d7e09d03 2208
bdbb0512 2209 data = (void *)buf;
d7e09d03 2210 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
34e1f2bb
JL
2211 !data->ioc_pbuf1 || !data->ioc_pbuf2) {
2212 rc = -EINVAL;
2213 goto out_statfs;
2214 }
d7e09d03
PT
2215
2216 if (data->ioc_inllen1 != sizeof(__u32) ||
2217 data->ioc_inllen2 != sizeof(__u32) ||
2218 data->ioc_plen1 != sizeof(struct obd_statfs) ||
34e1f2bb
JL
2219 data->ioc_plen2 != sizeof(struct obd_uuid)) {
2220 rc = -EINVAL;
2221 goto out_statfs;
2222 }
d7e09d03
PT
2223
2224 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
da5ecb4d 2225 if (type & LL_STATFS_LMV) {
d7e09d03 2226 exp = sbi->ll_md_exp;
da5ecb4d 2227 } else if (type & LL_STATFS_LOV) {
d7e09d03 2228 exp = sbi->ll_dt_exp;
da5ecb4d 2229 } else {
34e1f2bb
JL
2230 rc = -ENODEV;
2231 goto out_statfs;
2232 }
d7e09d03 2233
44164fc9 2234 rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
d7e09d03 2235 if (rc)
34e1f2bb 2236 goto out_statfs;
d7e09d03
PT
2237out_statfs:
2238 if (buf)
2239 obd_ioctl_freedata(buf, len);
2240 return rc;
2241}
2242
2243int ll_process_config(struct lustre_cfg *lcfg)
2244{
2245 char *ptr;
2246 void *sb;
2247 struct lprocfs_static_vars lvars;
2248 unsigned long x;
2249 int rc = 0;
2250
2251 lprocfs_llite_init_vars(&lvars);
2252
2253 /* The instance name contains the sb: lustre-client-aacfe000 */
2254 ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2255 if (!ptr || !*(++ptr))
2256 return -EINVAL;
692f2b6c 2257 rc = kstrtoul(ptr, 16, &x);
2258 if (rc != 0)
d7e09d03
PT
2259 return -EINVAL;
2260 sb = (void *)x;
2261 /* This better be a real Lustre superblock! */
2262 LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2263
2264 /* Note we have not called client_common_fill_super yet, so
c0894c6c
OD
2265 * proc fns must be able to handle that!
2266 */
d7e09d03
PT
2267 rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2268 lcfg, sb);
2269 if (rc > 0)
2270 rc = 0;
fbe7c6c7 2271 return rc;
d7e09d03
PT
2272}
2273
2274/* this function prepares md_op_data hint for passing ot down to MD stack. */
aff9d8e8 2275struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
e15ba45d 2276 struct inode *i1, struct inode *i2,
930f60e6
DE
2277 const char *name, size_t namelen,
2278 u32 mode, __u32 opc, void *data)
d7e09d03 2279{
d097d67b
JH
2280 if (!name) {
2281 /* Do not reuse namelen for something else. */
2282 if (namelen)
2283 return ERR_PTR(-EINVAL);
2284 } else {
2285 if (namelen > ll_i2sbi(i1)->ll_namelen)
2286 return ERR_PTR(-ENAMETOOLONG);
2287
2288 if (!lu_name_is_valid_2(name, namelen))
2289 return ERR_PTR(-EINVAL);
2290 }
d7e09d03 2291
6e16818b 2292 if (!op_data)
496a51bd 2293 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
d7e09d03 2294
6e16818b 2295 if (!op_data)
d7e09d03
PT
2296 return ERR_PTR(-ENOMEM);
2297
2298 ll_i2gids(op_data->op_suppgids, i1, i2);
2299 op_data->op_fid1 = *ll_inode2fid(i1);
d81e9009 2300 op_data->op_default_stripe_offset = -1;
2301 if (S_ISDIR(i1->i_mode)) {
2de35386 2302 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
d0d3caae 2303 if (opc == LUSTRE_OPC_MKDIR)
2304 op_data->op_default_stripe_offset =
2305 ll_i2info(i1)->lli_def_stripe_offset;
d81e9009 2306 }
d7e09d03 2307
1c12cf63 2308 if (i2) {
d7e09d03 2309 op_data->op_fid2 = *ll_inode2fid(i2);
1c12cf63 2310 if (S_ISDIR(i2->i_mode))
2de35386 2311 op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
1c12cf63 2312 } else {
d7e09d03 2313 fid_zero(&op_data->op_fid2);
1c12cf63 2314 }
2315
2316 if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
2317 op_data->op_cli_flags |= CLI_HASH64;
2318
2319 if (ll_need_32bit_api(ll_i2sbi(i1)))
2320 op_data->op_cli_flags |= CLI_API32;
d7e09d03
PT
2321
2322 op_data->op_name = name;
2323 op_data->op_namelen = namelen;
2324 op_data->op_mode = mode;
14e3f92a 2325 op_data->op_mod_time = ktime_get_real_seconds();
4b1a25f0
PT
2326 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2327 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
d7e09d03 2328 op_data->op_cap = cfs_curproc_cap_pack();
6e16818b 2329 if ((opc == LUSTRE_OPC_CREATE) && name &&
1d62e09c 2330 filename_is_volatile(name, namelen, &op_data->op_mds))
d7e09d03 2331 op_data->op_bias |= MDS_CREATE_VOLATILE;
1d62e09c 2332 else
2333 op_data->op_mds = 0;
d7e09d03
PT
2334 op_data->op_data = data;
2335
d7e09d03
PT
2336 return op_data;
2337}
2338
2339void ll_finish_md_op_data(struct md_op_data *op_data)
2340{
97903a26 2341 kfree(op_data);
d7e09d03
PT
2342}
2343
2344int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2345{
2346 struct ll_sb_info *sbi;
2347
6e16818b 2348 LASSERT(seq && dentry);
d7e09d03
PT
2349 sbi = ll_s2sbi(dentry->d_sb);
2350
2351 if (sbi->ll_flags & LL_SBI_NOLCK)
2352 seq_puts(seq, ",nolock");
2353
2354 if (sbi->ll_flags & LL_SBI_FLOCK)
2355 seq_puts(seq, ",flock");
2356
2357 if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2358 seq_puts(seq, ",localflock");
2359
2360 if (sbi->ll_flags & LL_SBI_USER_XATTR)
2361 seq_puts(seq, ",user_xattr");
2362
2363 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2364 seq_puts(seq, ",lazystatfs");
2365
2366 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2367 seq_puts(seq, ",user_fid2path");
2368
bfb9944c
WW
2369 if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
2370 seq_puts(seq, ",always_ping");
2371
0a3bdb00 2372 return 0;
d7e09d03
PT
2373}
2374
2375/**
2376 * Get obd name by cmd, and copy out to user space
2377 */
2378int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2379{
2380 struct ll_sb_info *sbi = ll_i2sbi(inode);
2381 struct obd_device *obd;
d7e09d03
PT
2382
2383 if (cmd == OBD_IOC_GETDTNAME)
2384 obd = class_exp2obd(sbi->ll_dt_exp);
2385 else if (cmd == OBD_IOC_GETMDNAME)
2386 obd = class_exp2obd(sbi->ll_md_exp);
2387 else
0a3bdb00 2388 return -EINVAL;
d7e09d03
PT
2389
2390 if (!obd)
0a3bdb00 2391 return -ENOENT;
d7e09d03 2392
7ac5db21
OD
2393 if (copy_to_user((void __user *)arg, obd->obd_name,
2394 strlen(obd->obd_name) + 1))
0a3bdb00 2395 return -EFAULT;
d7e09d03 2396
0a3bdb00 2397 return 0;
d7e09d03
PT
2398}
2399
2400/**
2401 * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2402 * fsname will be returned in this buffer; otherwise, a static buffer will be
2403 * used to store the fsname and returned to caller.
2404 */
2405char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2406{
2407 static char fsname_static[MTI_NAME_MAXLEN];
2408 struct lustre_sb_info *lsi = s2lsi(sb);
2409 char *ptr;
2410 int len;
2411
6e16818b 2412 if (!buf) {
d7e09d03
PT
2413 /* this means the caller wants to use static buffer
2414 * and it doesn't care about race. Usually this is
c0894c6c
OD
2415 * in error reporting path
2416 */
d7e09d03
PT
2417 buf = fsname_static;
2418 buflen = sizeof(fsname_static);
2419 }
2420
2421 len = strlen(lsi->lsi_lmd->lmd_profile);
2422 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2423 if (ptr && (strcmp(ptr, "-client") == 0))
2424 len -= 7;
2425
2426 if (unlikely(len >= buflen))
2427 len = buflen - 1;
2428 strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2429 buf[len] = '\0';
2430
2431 return buf;
2432}
2433
d7e09d03
PT
2434void ll_dirty_page_discard_warn(struct page *page, int ioret)
2435{
2436 char *buf, *path = NULL;
2437 struct dentry *dentry = NULL;
8c7b0e1a 2438 struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
d7e09d03
PT
2439
2440 /* this can be called inside spin lock so use GFP_ATOMIC. */
2441 buf = (char *)__get_free_page(GFP_ATOMIC);
6e16818b 2442 if (buf) {
d7e09d03 2443 dentry = d_find_alias(page->mapping->host);
6e16818b 2444 if (dentry)
1ad581eb 2445 path = dentry_path_raw(dentry, buf, PAGE_SIZE);
d7e09d03
PT
2446 }
2447
73b89907 2448 CDEBUG(D_WARNING,
2d00bd17
JP
2449 "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
2450 ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
73b89907 2451 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
8c7b0e1a 2452 PFID(&obj->vob_header.coh_lu.loh_fid),
73b89907 2453 (path && !IS_ERR(path)) ? path : "", ioret);
d7e09d03 2454
6e16818b 2455 if (dentry)
d7e09d03
PT
2456 dput(dentry);
2457
6e16818b 2458 if (buf)
d7e09d03
PT
2459 free_page((unsigned long)buf);
2460}
c948390f 2461
dbf789ce
JX
2462ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
2463 struct lov_user_md **kbuf)
2464{
2465 struct lov_user_md lum;
2466 ssize_t lum_size;
2467
2468 if (copy_from_user(&lum, md, sizeof(lum))) {
2469 lum_size = -EFAULT;
2470 goto no_kbuf;
2471 }
2472
2473 lum_size = ll_lov_user_md_size(&lum);
2474 if (lum_size < 0)
2475 goto no_kbuf;
2476
2477 *kbuf = kzalloc(lum_size, GFP_NOFS);
2478 if (!*kbuf) {
2479 lum_size = -ENOMEM;
2480 goto no_kbuf;
2481 }
2482
2483 if (copy_from_user(*kbuf, md, lum_size) != 0) {
2484 kfree(*kbuf);
2485 *kbuf = NULL;
2486 lum_size = -EFAULT;
2487 }
2488no_kbuf:
2489 return lum_size;
2490}
2491
c948390f
GP
2492/*
2493 * Compute llite root squash state after a change of root squash
2494 * configuration setting or add/remove of a lnet nid
2495 */
2496void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
2497{
2498 struct root_squash_info *squash = &sbi->ll_squash;
2499 lnet_process_id_t id;
2500 bool matched;
2501 int i;
2502
2503 /* Update norootsquash flag */
2504 down_write(&squash->rsi_sem);
2505 if (list_empty(&squash->rsi_nosquash_nids)) {
2506 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
2507 } else {
2508 /*
2509 * Do not apply root squash as soon as one of our NIDs is
2510 * in the nosquash_nids list
2511 */
2512 matched = false;
2513 i = 0;
2514
2515 while (LNetGetId(i++, &id) != -ENOENT) {
2516 if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND)
2517 continue;
2518 if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
2519 matched = true;
2520 break;
2521 }
2522 }
2523 if (matched)
2524 sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
2525 else
2526 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
2527 }
2528 up_write(&squash->rsi_sem);
2529}
a6d879fd
HD
2530
2531/**
2532 * Parse linkea content to extract information about a given hardlink
2533 *
2534 * \param[in] ldata - Initialized linkea data
2535 * \param[in] linkno - Link identifier
2536 * \param[out] parent_fid - The entry's parent FID
2537 * \param[in] size - Entry name destination buffer
2538 *
2539 * \retval 0 on success
2540 * \retval Appropriate negative error code on failure
2541 */
2542static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
2543 struct lu_fid *parent_fid, struct lu_name *ln)
2544{
2545 unsigned int idx;
2546 int rc;
2547
2548 rc = linkea_init(ldata);
2549 if (rc < 0)
2550 return rc;
2551
2552 if (linkno >= ldata->ld_leh->leh_reccount)
2553 /* beyond last link */
2554 return -ENODATA;
2555
2556 linkea_first_entry(ldata);
2557 for (idx = 0; ldata->ld_lee; idx++) {
2558 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
2559 parent_fid);
2560 if (idx == linkno)
2561 break;
2562
2563 linkea_next_entry(ldata);
2564 }
2565
2566 if (idx < linkno)
2567 return -ENODATA;
2568
2569 return 0;
2570}
2571
2572/**
2573 * Get parent FID and name of an identified link. Operation is performed for
2574 * a given link number, letting the caller iterate over linkno to list one or
2575 * all links of an entry.
2576 *
2577 * \param[in] file - File descriptor against which to perform the operation
2578 * \param[in,out] arg - User-filled structure containing the linkno to operate
2579 * on and the available size. It is eventually filled with
2580 * the requested information or left untouched on error
2581 *
2582 * \retval - 0 on success
2583 * \retval - Appropriate negative error code on failure
2584 */
2585int ll_getparent(struct file *file, struct getparent __user *arg)
2586{
2587 struct inode *inode = file_inode(file);
2588 struct linkea_data *ldata;
2589 struct lu_fid parent_fid;
2590 struct lu_buf buf = {
2591 .lb_buf = NULL,
2592 .lb_len = 0
2593 };
2594 struct lu_name ln;
2595 u32 name_size;
2596 u32 linkno;
2597 int rc;
2598
2599 if (!capable(CFS_CAP_DAC_READ_SEARCH) &&
2600 !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
2601 return -EPERM;
2602
2603 if (get_user(name_size, &arg->gp_name_size))
2604 return -EFAULT;
2605
2606 if (get_user(linkno, &arg->gp_linkno))
2607 return -EFAULT;
2608
2609 if (name_size > PATH_MAX)
2610 return -EINVAL;
2611
2612 ldata = kzalloc(sizeof(*ldata), GFP_NOFS);
2613 if (!ldata)
2614 return -ENOMEM;
2615
2616 rc = linkea_data_new(ldata, &buf);
2617 if (rc < 0)
2618 goto ldata_free;
2619
2620 rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
2621 buf.lb_len, OBD_MD_FLXATTR);
2622 if (rc < 0)
2623 goto lb_free;
2624
2625 rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
2626 if (rc < 0)
2627 goto lb_free;
2628
2629 if (ln.ln_namelen >= name_size) {
2630 rc = -EOVERFLOW;
2631 goto lb_free;
2632 }
2633
2634 if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid))) {
2635 rc = -EFAULT;
2636 goto lb_free;
2637 }
2638
2639 if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen)) {
2640 rc = -EFAULT;
2641 goto lb_free;
2642 }
2643
2644 if (put_user('\0', arg->gp_name + ln.ln_namelen)) {
2645 rc = -EFAULT;
2646 goto lb_free;
2647 }
2648
2649lb_free:
2650 lu_buf_free(&buf);
2651ldata_free:
2652 kfree(ldata);
2653 return rc;
2654}