staging: lustre: obdclass: remove structure holes to reduce memory
[linux-block.git] / drivers / staging / lustre / lustre / llite / llite_lib.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
6a5b99a4 18 * http://www.gnu.org/licenses/gpl-2.0.html
d7e09d03 19 *
d7e09d03
PT
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
1dc563a6 26 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/llite/llite_lib.c
33 *
34 * Lustre Light Super operations
35 */
36
37#define DEBUG_SUBSYSTEM S_LLITE
38
39#include <linux/module.h>
a9c7db39 40#include <linux/statfs.h>
d7e09d03 41#include <linux/types.h>
d7e09d03
PT
42#include <linux/mm.h>
43
8877d3bf 44#include "../include/lustre/lustre_ioctl.h"
67a235f5
GKH
45#include "../include/lustre_ha.h"
46#include "../include/lustre_dlm.h"
47#include "../include/lprocfs_status.h"
48#include "../include/lustre_disk.h"
49#include "../include/lustre_param.h"
50#include "../include/lustre_log.h"
51#include "../include/cl_object.h"
52#include "../include/obd_cksum.h"
d7e09d03
PT
53#include "llite_internal.h"
54
55struct kmem_cache *ll_file_data_slab;
ae7c0f48 56struct dentry *llite_root;
fd0d04ba 57struct kset *llite_kset;
d7e09d03 58
d7e09d03
PT
59#ifndef log2
60#define log2(n) ffz(~(n))
61#endif
62
fd0d04ba 63static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
d7e09d03
PT
64{
65 struct ll_sb_info *sbi = NULL;
66 unsigned long pages;
67 unsigned long lru_page_max;
68 struct sysinfo si;
69 class_uuid_t uuid;
70 int i;
d7e09d03 71
496a51bd 72 sbi = kzalloc(sizeof(*sbi), GFP_NOFS);
d7e09d03 73 if (!sbi)
0a3bdb00 74 return NULL;
d7e09d03
PT
75
76 spin_lock_init(&sbi->ll_lock);
77 mutex_init(&sbi->ll_lco.lco_lock);
78 spin_lock_init(&sbi->ll_pp_extent_lock);
79 spin_lock_init(&sbi->ll_process_lock);
80 sbi->ll_rw_stats_on = 0;
81
82 si_meminfo(&si);
83 pages = si.totalram - si.totalhigh;
5196e42c 84 lru_page_max = pages / 2;
d7e09d03 85
1b02bde3
EL
86 sbi->ll_cache = cl_cache_init(lru_page_max);
87 if (!sbi->ll_cache) {
88 kfree(sbi);
89 return NULL;
90 }
ac5b1481 91
d7e09d03
PT
92 sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
93 SBI_DEFAULT_READAHEAD_MAX);
94 sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
95 sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
96 SBI_DEFAULT_READAHEAD_WHOLE_MAX;
d7e09d03
PT
97
98 ll_generate_random_uuid(uuid);
99 class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
100 CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
101
d7e09d03
PT
102 sbi->ll_flags |= LL_SBI_VERBOSE;
103 sbi->ll_flags |= LL_SBI_CHECKSUM;
104
105 sbi->ll_flags |= LL_SBI_LRU_RESIZE;
106
107 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
108 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
109 pp_r_hist.oh_lock);
110 spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
111 pp_w_hist.oh_lock);
112 }
113
114 /* metadata statahead is enabled by default */
115 sbi->ll_sa_max = LL_SA_RPC_DEF;
116 atomic_set(&sbi->ll_sa_total, 0);
117 atomic_set(&sbi->ll_sa_wrong, 0);
e9792be1 118 atomic_set(&sbi->ll_sa_running, 0);
d7e09d03
PT
119 atomic_set(&sbi->ll_agl_total, 0);
120 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
121
c948390f
GP
122 /* root squash */
123 sbi->ll_squash.rsi_uid = 0;
124 sbi->ll_squash.rsi_gid = 0;
125 INIT_LIST_HEAD(&sbi->ll_squash.rsi_nosquash_nids);
126 init_rwsem(&sbi->ll_squash.rsi_sem);
127
fd0d04ba
OD
128 sbi->ll_sb = sb;
129
0a3bdb00 130 return sbi;
d7e09d03
PT
131}
132
2d95f10e 133static void ll_free_sbi(struct super_block *sb)
d7e09d03
PT
134{
135 struct ll_sb_info *sbi = ll_s2sbi(sb);
d7e09d03 136
1b02bde3 137 if (sbi->ll_cache) {
c948390f
GP
138 if (!list_empty(&sbi->ll_squash.rsi_nosquash_nids))
139 cfs_free_nidlist(&sbi->ll_squash.rsi_nosquash_nids);
1b02bde3
EL
140 cl_cache_decref(sbi->ll_cache);
141 sbi->ll_cache = NULL;
142 }
143
ad88aae0 144 kfree(sbi);
d7e09d03
PT
145}
146
d7e09d03
PT
147static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
148 struct vfsmount *mnt)
149{
ea7893bb 150 struct inode *root = NULL;
d7e09d03
PT
151 struct ll_sb_info *sbi = ll_s2sbi(sb);
152 struct obd_device *obd;
d7e09d03
PT
153 struct obd_statfs *osfs = NULL;
154 struct ptlrpc_request *request = NULL;
155 struct obd_connect_data *data = NULL;
156 struct obd_uuid *uuid;
157 struct md_op_data *op_data;
158 struct lustre_md lmd;
21aef7d9 159 u64 valid;
d7e09d03 160 int size, err, checksum;
d7e09d03
PT
161
162 obd = class_name2obd(md);
163 if (!obd) {
164 CERROR("MD %s: not setup or attached\n", md);
0a3bdb00 165 return -EINVAL;
d7e09d03
PT
166 }
167
496a51bd
JL
168 data = kzalloc(sizeof(*data), GFP_NOFS);
169 if (!data)
0a3bdb00 170 return -ENOMEM;
d7e09d03 171
496a51bd
JL
172 osfs = kzalloc(sizeof(*osfs), GFP_NOFS);
173 if (!osfs) {
97903a26 174 kfree(data);
0a3bdb00 175 return -ENOMEM;
d7e09d03
PT
176 }
177
d7e09d03
PT
178 /* indicate the features supported by this client */
179 data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
180 OBD_CONNECT_ATTRFID |
181 OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
d7e09d03
PT
182 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
183 OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
341f1f0a
FY
184 OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
185 OBD_CONNECT_64BITHASH |
d7e09d03
PT
186 OBD_CONNECT_EINPROGRESS |
187 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
7fc1f831 188 OBD_CONNECT_LAYOUTLOCK |
69342b78
AS
189 OBD_CONNECT_PINGLESS |
190 OBD_CONNECT_MAX_EASIZE |
63d42578 191 OBD_CONNECT_FLOCK_DEAD |
c1b66fcc 192 OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
4edc630a 193 OBD_CONNECT_OPEN_BY_FID |
194 OBD_CONNECT_DIR_STRIPE;
d7e09d03 195
d7e09d03
PT
196 if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
197 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
198#ifdef CONFIG_FS_POSIX_ACL
199 data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
200#endif
201
202 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
203 /* flag mdc connection as lightweight, only used for test
c0894c6c
OD
204 * purpose, use with care
205 */
d7e09d03
PT
206 data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
207
208 data->ocd_ibits_known = MDS_INODELOCK_FULL;
209 data->ocd_version = LUSTRE_VERSION_CODE;
210
211 if (sb->s_flags & MS_RDONLY)
212 data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
213 if (sbi->ll_flags & LL_SBI_USER_XATTR)
214 data->ocd_connect_flags |= OBD_CONNECT_XATTR;
215
d7e09d03
PT
216 if (sbi->ll_flags & LL_SBI_FLOCK)
217 sbi->ll_fop = &ll_file_operations_flock;
218 else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
219 sbi->ll_fop = &ll_file_operations;
220 else
221 sbi->ll_fop = &ll_file_operations_noflock;
222
223 /* real client */
224 data->ocd_connect_flags |= OBD_CONNECT_REAL;
d7e09d03
PT
225
226 data->ocd_brw_size = MD_MAX_BRW_SIZE;
227
e6768831
TJ
228 err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
229 data, NULL);
d7e09d03 230 if (err == -EBUSY) {
2d00bd17
JP
231 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
232 md);
34e1f2bb 233 goto out;
d7e09d03
PT
234 } else if (err) {
235 CERROR("cannot connect to %s: rc = %d\n", md, err);
34e1f2bb 236 goto out;
d7e09d03
PT
237 }
238
239 sbi->ll_md_exp->exp_connect_data = *data;
240
241 err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
242 LUSTRE_SEQ_METADATA);
243 if (err) {
2d00bd17
JP
244 CERROR("%s: Can't init metadata layer FID infrastructure, rc = %d\n",
245 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 246 goto out_md;
d7e09d03
PT
247 }
248
249 /* For mount, we only need fs info from MDT0, and also in DNE, it
250 * can make sure the client can be mounted as long as MDT0 is
c0894c6c
OD
251 * available
252 */
d7e09d03 253 err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
e15ba45d
OD
254 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
255 OBD_STATFS_FOR_MDT0);
d7e09d03 256 if (err)
34e1f2bb 257 goto out_md_fid;
d7e09d03
PT
258
259 /* This needs to be after statfs to ensure connect has finished.
260 * Note that "data" does NOT contain the valid connect reply.
261 * If connecting to a 1.8 server there will be no LMV device, so
262 * we can access the MDC export directly and exp_connect_flags will
263 * be non-zero, but if accessing an upgraded 2.1 server it will
264 * have the correct flags filled in.
c0894c6c
OD
265 * XXX: fill in the LMV exp_connect_flags from MDC(s).
266 */
d7e09d03
PT
267 valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
268 if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
269 valid != CLIENT_CONNECT_MDT_REQD) {
270 char *buf;
271
09cbfeaf 272 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
db562e81
GEHP
273 if (!buf) {
274 err = -ENOMEM;
275 goto out_md_fid;
276 }
09cbfeaf 277 obd_connect_flags2str(buf, PAGE_SIZE,
d7e09d03 278 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
2d00bd17 279 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
d7e09d03 280 sbi->ll_md_exp->exp_obd->obd_name, buf);
97903a26 281 kfree(buf);
34e1f2bb
JL
282 err = -EPROTO;
283 goto out_md_fid;
d7e09d03
PT
284 }
285
286 size = sizeof(*data);
287 err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
7c6564d0 288 KEY_CONN_DATA, &size, data);
d7e09d03
PT
289 if (err) {
290 CERROR("%s: Get connect data failed: rc = %d\n",
291 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 292 goto out_md_fid;
d7e09d03
PT
293 }
294
295 LASSERT(osfs->os_bsize);
296 sb->s_blocksize = osfs->os_bsize;
297 sb->s_blocksize_bits = log2(osfs->os_bsize);
298 sb->s_magic = LL_SUPER_MAGIC;
299 sb->s_maxbytes = MAX_LFS_FILESIZE;
300 sbi->ll_namelen = osfs->os_namelen;
d7e09d03
PT
301
302 if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
303 !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
2d00bd17 304 LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
d7e09d03
PT
305 sbi->ll_flags &= ~LL_SBI_USER_XATTR;
306 }
307
308 if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
d7e09d03 309 sb->s_flags |= MS_POSIXACL;
d7e09d03
PT
310 sbi->ll_flags |= LL_SBI_ACL;
311 } else {
312 LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
d7e09d03 313 sb->s_flags &= ~MS_POSIXACL;
d7e09d03
PT
314 sbi->ll_flags &= ~LL_SBI_ACL;
315 }
316
d7e09d03
PT
317 if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
318 sbi->ll_flags |= LL_SBI_64BIT_HASH;
319
320 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
d8c0b0a9 321 sbi->ll_md_brw_pages = data->ocd_brw_size >> PAGE_SHIFT;
d7e09d03 322 else
d8c0b0a9 323 sbi->ll_md_brw_pages = 1;
d7e09d03 324
ae33a836 325 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
d7e09d03 326 sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
d7e09d03 327
7fc1f831
AP
328 if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
329 if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
330 LCONSOLE_INFO(
331 "%s: disabling xattr cache due to unknown maximum xattr size.\n",
332 dt);
333 } else {
334 sbi->ll_flags |= LL_SBI_XATTR_CACHE;
335 sbi->ll_xattr_cache_enabled = 1;
336 }
337 }
338
d7e09d03
PT
339 obd = class_name2obd(dt);
340 if (!obd) {
341 CERROR("DT %s: not setup or attached\n", dt);
34e1f2bb
JL
342 err = -ENODEV;
343 goto out_md_fid;
d7e09d03
PT
344 }
345
346 data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
347 OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
348 OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
349 OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
341f1f0a
FY
350 OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
351 OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
352 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
d7e09d03
PT
353 OBD_CONNECT_EINPROGRESS |
354 OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
355 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
356
d7e09d03
PT
357 if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
358 /* OBD_CONNECT_CKSUM should always be set, even if checksums are
359 * disabled by default, because it can still be enabled on the
40cc864a 360 * fly via /sys. As a consequence, we still need to come to an
c0894c6c
OD
361 * agreement on the supported algorithms at connect time
362 */
d7e09d03
PT
363 data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
364
365 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
366 data->ocd_cksum_types = OBD_CKSUM_ADLER;
367 else
368 data->ocd_cksum_types = cksum_types_supported_client();
369 }
370
371 data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
d7e09d03 372
2d00bd17
JP
373 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
374 data->ocd_connect_flags,
d7e09d03
PT
375 data->ocd_version, data->ocd_grant);
376
377 obd->obd_upcall.onu_owner = &sbi->ll_lco;
378 obd->obd_upcall.onu_upcall = cl_ocd_update;
379
380 data->ocd_brw_size = DT_MAX_BRW_SIZE;
381
382 err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
383 NULL);
384 if (err == -EBUSY) {
2d00bd17
JP
385 LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
386 dt);
34e1f2bb 387 goto out_md;
d7e09d03
PT
388 } else if (err) {
389 CERROR("%s: Cannot connect to %s: rc = %d\n",
390 sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
34e1f2bb 391 goto out_md;
d7e09d03
PT
392 }
393
394 sbi->ll_dt_exp->exp_connect_data = *data;
395
396 err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
397 LUSTRE_SEQ_METADATA);
398 if (err) {
2d00bd17
JP
399 CERROR("%s: Can't init data layer FID infrastructure, rc = %d\n",
400 sbi->ll_dt_exp->exp_obd->obd_name, err);
34e1f2bb 401 goto out_dt;
d7e09d03
PT
402 }
403
404 mutex_lock(&sbi->ll_lco.lco_lock);
405 sbi->ll_lco.lco_flags = data->ocd_connect_flags;
406 sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
407 sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
408 mutex_unlock(&sbi->ll_lco.lco_lock);
409
410 fid_zero(&sbi->ll_root_fid);
ef2e0f55 411 err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid);
d7e09d03
PT
412 if (err) {
413 CERROR("cannot mds_connect: rc = %d\n", err);
34e1f2bb 414 goto out_lock_cn_cb;
d7e09d03
PT
415 }
416 if (!fid_is_sane(&sbi->ll_root_fid)) {
417 CERROR("%s: Invalid root fid "DFID" during mount\n",
418 sbi->ll_md_exp->exp_obd->obd_name,
419 PFID(&sbi->ll_root_fid));
34e1f2bb
JL
420 err = -EINVAL;
421 goto out_lock_cn_cb;
d7e09d03
PT
422 }
423 CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
424
425 sb->s_op = &lustre_super_operations;
2c563880 426 sb->s_xattr = ll_xattr_handlers;
d7e09d03
PT
427#if THREAD_SIZE >= 8192 /*b=17630*/
428 sb->s_export_op = &lustre_export_operations;
429#endif
430
431 /* make root inode
c0894c6c
OD
432 * XXX: move this to after cbd setup?
433 */
483eec0d 434 valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
341f1f0a 435 if (sbi->ll_flags & LL_SBI_ACL)
d7e09d03
PT
436 valid |= OBD_MD_FLACL;
437
496a51bd
JL
438 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
439 if (!op_data) {
34e1f2bb
JL
440 err = -ENOMEM;
441 goto out_lock_cn_cb;
442 }
d7e09d03
PT
443
444 op_data->op_fid1 = sbi->ll_root_fid;
445 op_data->op_mode = 0;
d7e09d03
PT
446 op_data->op_valid = valid;
447
448 err = md_getattr(sbi->ll_md_exp, op_data, &request);
97903a26 449 kfree(op_data);
d7e09d03
PT
450 if (err) {
451 CERROR("%s: md_getattr failed for root: rc = %d\n",
452 sbi->ll_md_exp->exp_obd->obd_name, err);
34e1f2bb 453 goto out_lock_cn_cb;
d7e09d03
PT
454 }
455
456 err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
457 sbi->ll_md_exp, &lmd);
458 if (err) {
459 CERROR("failed to understand root inode md: rc = %d\n", err);
460 ptlrpc_req_finished(request);
34e1f2bb 461 goto out_lock_cn_cb;
d7e09d03
PT
462 }
463
464 LASSERT(fid_is_sane(&sbi->ll_root_fid));
465 root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
c1e2699d 466 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03
PT
467 &lmd);
468 md_free_lustre_md(sbi->ll_md_exp, &lmd);
469 ptlrpc_req_finished(request);
470
c3397e7e 471 if (IS_ERR(root)) {
d7e09d03
PT
472#ifdef CONFIG_FS_POSIX_ACL
473 if (lmd.posix_acl) {
474 posix_acl_release(lmd.posix_acl);
475 lmd.posix_acl = NULL;
476 }
477#endif
020ecc6f 478 err = -EBADF;
d7e09d03 479 CERROR("lustre_lite: bad iget4 for root\n");
34e1f2bb 480 goto out_root;
d7e09d03
PT
481 }
482
d7e09d03
PT
483 checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
484 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
485 KEY_CHECKSUM, sizeof(checksum), &checksum,
486 NULL);
76cc3abe
YS
487 if (err) {
488 CERROR("%s: Set checksum failed: rc = %d\n",
489 sbi->ll_dt_exp->exp_obd->obd_name, err);
490 goto out_root;
491 }
d7e09d03
PT
492 cl_sb_init(sb);
493
494 err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
1b02bde3
EL
495 KEY_CACHE_SET, sizeof(*sbi->ll_cache),
496 sbi->ll_cache, NULL);
76cc3abe
YS
497 if (err) {
498 CERROR("%s: Set cache_set failed: rc = %d\n",
499 sbi->ll_dt_exp->exp_obd->obd_name, err);
500 goto out_root;
501 }
d7e09d03
PT
502
503 sb->s_root = d_make_root(root);
6e16818b 504 if (!sb->s_root) {
d7e09d03 505 CERROR("%s: can't make root dentry\n",
e15ba45d 506 ll_get_fsname(sb, NULL, 0));
34e1f2bb 507 err = -ENOMEM;
caf382fe 508 goto out_lock_cn_cb;
d7e09d03
PT
509 }
510
d7e09d03
PT
511 sbi->ll_sdev_orig = sb->s_dev;
512
513 /* We set sb->s_dev equal on all lustre clients in order to support
514 * NFS export clustering. NFSD requires that the FSID be the same
c0894c6c
OD
515 * on all clients.
516 */
d7e09d03 517 /* s_dev is also used in lt_compare() to compare two fs, but that is
c0894c6c
OD
518 * only a node-local comparison.
519 */
d7e09d03 520 uuid = obd_get_uuid(sbi->ll_md_exp);
6e16818b 521 if (uuid) {
d7e09d03 522 sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
bd994071
FY
523 get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
524 }
d7e09d03 525
081825f5
JL
526 kfree(data);
527 kfree(osfs);
d7e09d03 528
46dfb5aa
GM
529 if (llite_root) {
530 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
531 if (err < 0) {
532 CERROR("%s: could not register mount in debugfs: "
533 "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
534 err = 0;
535 }
536 }
537
0a3bdb00 538 return err;
d7e09d03 539out_root:
ddafd514 540 iput(root);
d7e09d03
PT
541out_lock_cn_cb:
542 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
543out_dt:
544 obd_disconnect(sbi->ll_dt_exp);
545 sbi->ll_dt_exp = NULL;
d7e09d03
PT
546out_md_fid:
547 obd_fid_fini(sbi->ll_md_exp->exp_obd);
548out_md:
549 obd_disconnect(sbi->ll_md_exp);
550 sbi->ll_md_exp = NULL;
551out:
081825f5
JL
552 kfree(data);
553 kfree(osfs);
d7e09d03
PT
554 return err;
555}
556
557int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
558{
559 int size, rc;
560
f648eed6
JH
561 size = sizeof(*lmmsize);
562 rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
563 KEY_MAX_EASIZE, &size, lmmsize);
564 if (rc) {
565 CERROR("%s: cannot get max LOV EA size: rc = %d\n",
566 sbi->ll_dt_exp->exp_obd->obd_name, rc);
567 return rc;
568 }
569
d7e09d03
PT
570 size = sizeof(int);
571 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
7c6564d0 572 KEY_MAX_EASIZE, &size, lmmsize);
d7e09d03 573 if (rc)
4f211c20 574 CERROR("Get max mdsize error rc %d\n", rc);
d7e09d03 575
0a3bdb00 576 return rc;
44779340
BB
577}
578
60b65afb
NB
579/**
580 * Get the value of the default_easize parameter.
581 *
582 * \see client_obd::cl_default_mds_easize
583 *
584 * \param[in] sbi superblock info for this filesystem
585 * \param[out] lmmsize pointer to storage location for value
586 *
587 * \retval 0 on success
588 * \retval negative negated errno on failure
589 */
44779340
BB
590int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
591{
592 int size, rc;
593
594 size = sizeof(int);
595 rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
7c6564d0 596 KEY_DEFAULT_EASIZE, &size, lmmsize);
44779340
BB
597 if (rc)
598 CERROR("Get default mdsize error rc %d\n", rc);
599
600 return rc;
601}
602
60b65afb
NB
603/**
604 * Set the default_easize parameter to the given value.
605 *
606 * \see client_obd::cl_default_mds_easize
607 *
608 * \param[in] sbi superblock info for this filesystem
609 * \param[in] lmmsize the size to set
610 *
611 * \retval 0 on success
612 * \retval negative negated errno on failure
613 */
614int ll_set_default_mdsize(struct ll_sb_info *sbi, int lmmsize)
615{
8ed62e91
NB
616 if (lmmsize < sizeof(struct lov_mds_md) ||
617 lmmsize > OBD_MAX_DEFAULT_EA_SIZE)
60b65afb
NB
618 return -EINVAL;
619
620 return obd_set_info_async(NULL, sbi->ll_md_exp,
621 sizeof(KEY_DEFAULT_EASIZE),
622 KEY_DEFAULT_EASIZE,
623 sizeof(int), &lmmsize, NULL);
624}
625
2d95f10e 626static void client_common_put_super(struct super_block *sb)
d7e09d03
PT
627{
628 struct ll_sb_info *sbi = ll_s2sbi(sb);
d7e09d03 629
d7e09d03
PT
630 cl_sb_fini(sb);
631
d7e09d03
PT
632 obd_fid_fini(sbi->ll_dt_exp->exp_obd);
633 obd_disconnect(sbi->ll_dt_exp);
634 sbi->ll_dt_exp = NULL;
d7e09d03 635
ae7c0f48 636 ldebugfs_unregister_mountpoint(sbi);
d7e09d03
PT
637
638 obd_fid_fini(sbi->ll_md_exp->exp_obd);
639 obd_disconnect(sbi->ll_md_exp);
640 sbi->ll_md_exp = NULL;
d7e09d03
PT
641}
642
643void ll_kill_super(struct super_block *sb)
644{
645 struct ll_sb_info *sbi;
646
d7e09d03
PT
647 /* not init sb ?*/
648 if (!(sb->s_flags & MS_ACTIVE))
649 return;
650
651 sbi = ll_s2sbi(sb);
e6768831
TJ
652 /* we need to restore s_dev from changed for clustered NFS before
653 * put_super because new kernels have cached s_dev and change sb->s_dev
c0894c6c
OD
654 * in put_super not affected real removing devices
655 */
65fb55d1 656 if (sbi) {
d7e09d03 657 sb->s_dev = sbi->ll_sdev_orig;
65fb55d1 658 sbi->ll_umounting = 1;
e9792be1
LS
659
660 /* wait running statahead threads to quit */
661 while (atomic_read(&sbi->ll_sa_running) > 0) {
662 set_current_state(TASK_UNINTERRUPTIBLE);
663 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC >> 3));
664 }
65fb55d1 665 }
d7e09d03
PT
666}
667
d7e09d03
PT
668static inline int ll_set_opt(const char *opt, char *data, int fl)
669{
670 if (strncmp(opt, data, strlen(opt)) != 0)
fbe7c6c7 671 return 0;
d7e09d03 672 else
fbe7c6c7 673 return fl;
d7e09d03
PT
674}
675
676/* non-client-specific mount options are parsed in lmd_parse */
677static int ll_options(char *options, int *flags)
678{
679 int tmp;
680 char *s1 = options, *s2;
d7e09d03
PT
681
682 if (!options)
0a3bdb00 683 return 0;
d7e09d03
PT
684
685 CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
686
687 while (*s1) {
688 CDEBUG(D_SUPER, "next opt=%s\n", s1);
689 tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
690 if (tmp) {
691 *flags |= tmp;
692 goto next;
693 }
694 tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
695 if (tmp) {
696 *flags |= tmp;
697 goto next;
698 }
699 tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
700 if (tmp) {
701 *flags |= tmp;
702 goto next;
703 }
cd94f231
OD
704 tmp = ll_set_opt("noflock", s1,
705 LL_SBI_FLOCK | LL_SBI_LOCALFLOCK);
d7e09d03
PT
706 if (tmp) {
707 *flags &= ~tmp;
708 goto next;
709 }
710 tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
711 if (tmp) {
712 *flags |= tmp;
713 goto next;
714 }
715 tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
716 if (tmp) {
717 *flags &= ~tmp;
718 goto next;
719 }
542c45ac
AW
720 tmp = ll_set_opt("context", s1, 1);
721 if (tmp)
722 goto next;
723 tmp = ll_set_opt("fscontext", s1, 1);
724 if (tmp)
725 goto next;
726 tmp = ll_set_opt("defcontext", s1, 1);
727 if (tmp)
728 goto next;
729 tmp = ll_set_opt("rootcontext", s1, 1);
730 if (tmp)
731 goto next;
d7e09d03
PT
732 tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
733 if (tmp) {
734 *flags |= tmp;
735 goto next;
736 }
737 tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
738 if (tmp) {
739 *flags &= ~tmp;
740 goto next;
741 }
742
743 tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
744 if (tmp) {
745 *flags |= tmp;
746 goto next;
747 }
748 tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
749 if (tmp) {
750 *flags &= ~tmp;
751 goto next;
752 }
753 tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
754 if (tmp) {
755 *flags |= tmp;
756 goto next;
757 }
758 tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
759 if (tmp) {
760 *flags &= ~tmp;
761 goto next;
762 }
763 tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
764 if (tmp) {
765 *flags |= tmp;
766 goto next;
767 }
768 tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
769 if (tmp) {
770 *flags &= ~tmp;
771 goto next;
772 }
d7e09d03
PT
773 tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
774 if (tmp) {
775 *flags |= tmp;
776 goto next;
777 }
778 tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
779 if (tmp) {
780 *flags |= tmp;
781 goto next;
782 }
783 tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
784 if (tmp) {
785 *flags &= ~tmp;
786 goto next;
787 }
788 LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
789 s1);
0a3bdb00 790 return -EINVAL;
d7e09d03
PT
791
792next:
793 /* Find next opt */
794 s2 = strchr(s1, ',');
6e16818b 795 if (!s2)
d7e09d03
PT
796 break;
797 s1 = s2 + 1;
798 }
0a3bdb00 799 return 0;
d7e09d03
PT
800}
801
802void ll_lli_init(struct ll_inode_info *lli)
803{
804 lli->lli_inode_magic = LLI_INODE_MAGIC;
805 lli->lli_flags = 0;
d7e09d03
PT
806 spin_lock_init(&lli->lli_lock);
807 lli->lli_posix_acl = NULL;
d7e09d03
PT
808 /* Do not set lli_fid, it has been initialized already. */
809 fid_zero(&lli->lli_pfid);
d7e09d03
PT
810 lli->lli_mds_read_och = NULL;
811 lli->lli_mds_write_och = NULL;
812 lli->lli_mds_exec_och = NULL;
813 lli->lli_open_fd_read_count = 0;
814 lli->lli_open_fd_write_count = 0;
815 lli->lli_open_fd_exec_count = 0;
816 mutex_init(&lli->lli_och_mutex);
817 spin_lock_init(&lli->lli_agl_lock);
09aed8a5 818 spin_lock_init(&lli->lli_layout_lock);
55554f31 819 ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
d7e09d03
PT
820 lli->lli_clob = NULL;
821
7fc1f831
AP
822 init_rwsem(&lli->lli_xattrs_list_rwsem);
823 mutex_init(&lli->lli_xattrs_enq_lock);
824
d7e09d03
PT
825 LASSERT(lli->lli_vfs_inode.i_mode != 0);
826 if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
827 mutex_init(&lli->lli_readdir_mutex);
828 lli->lli_opendir_key = NULL;
829 lli->lli_sai = NULL;
d7e09d03
PT
830 spin_lock_init(&lli->lli_sa_lock);
831 lli->lli_opendir_pid = 0;
e9792be1 832 lli->lli_sa_enabled = 0;
d81e9009 833 lli->lli_def_stripe_offset = -1;
d7e09d03 834 } else {
47a57bde 835 mutex_init(&lli->lli_size_mutex);
d7e09d03
PT
836 lli->lli_symlink_name = NULL;
837 init_rwsem(&lli->lli_trunc_sem);
5b8a39c5 838 range_lock_tree_init(&lli->lli_write_tree);
d7e09d03
PT
839 init_rwsem(&lli->lli_glimpse_sem);
840 lli->lli_glimpse_time = 0;
841 INIT_LIST_HEAD(&lli->lli_agl_list);
842 lli->lli_agl_index = 0;
843 lli->lli_async_rc = 0;
d7e09d03
PT
844 }
845 mutex_init(&lli->lli_layout_mutex);
846}
847
848static inline int ll_bdi_register(struct backing_dev_info *bdi)
849{
850 static atomic_t ll_bdi_num = ATOMIC_INIT(0);
851
852 bdi->name = "lustre";
853 return bdi_register(bdi, NULL, "lustre-%d",
854 atomic_inc_return(&ll_bdi_num));
855}
856
857int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
858{
859 struct lustre_profile *lprof = NULL;
860 struct lustre_sb_info *lsi = s2lsi(sb);
861 struct ll_sb_info *sbi;
862 char *dt = NULL, *md = NULL;
863 char *profilenm = get_profile_name(sb);
864 struct config_llog_instance *cfg;
d7e09d03 865 int err;
d7e09d03
PT
866
867 CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
868
496a51bd
JL
869 cfg = kzalloc(sizeof(*cfg), GFP_NOFS);
870 if (!cfg)
0a3bdb00 871 return -ENOMEM;
d7e09d03
PT
872
873 try_module_get(THIS_MODULE);
874
875 /* client additional sb info */
7551b8b5
NC
876 sbi = ll_init_sbi(sb);
877 lsi->lsi_llsbi = sbi;
d7e09d03
PT
878 if (!sbi) {
879 module_put(THIS_MODULE);
97903a26 880 kfree(cfg);
0a3bdb00 881 return -ENOMEM;
d7e09d03
PT
882 }
883
884 err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
885 if (err)
34e1f2bb 886 goto out_free;
d7e09d03
PT
887
888 err = bdi_init(&lsi->lsi_bdi);
889 if (err)
34e1f2bb 890 goto out_free;
d7e09d03 891 lsi->lsi_flags |= LSI_BDI_INITIALIZED;
b4caecd4 892 lsi->lsi_bdi.capabilities = 0;
d7e09d03
PT
893 err = ll_bdi_register(&lsi->lsi_bdi);
894 if (err)
34e1f2bb 895 goto out_free;
d7e09d03
PT
896
897 sb->s_bdi = &lsi->lsi_bdi;
3ea8f3bc
LS
898 /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
899 sb->s_d_op = &ll_d_ops;
d7e09d03
PT
900
901 /* Generate a string unique to this super, in case some joker tries
c0894c6c
OD
902 * to mount the same fs at two mount points.
903 * Use the address of the super itself.
904 */
d7e09d03
PT
905 cfg->cfg_instance = sb;
906 cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
907 cfg->cfg_callback = class_config_llog_handler;
908 /* set up client obds */
909 err = lustre_process_log(sb, profilenm, cfg);
4fd9a8e9 910 if (err < 0)
34e1f2bb 911 goto out_free;
d7e09d03
PT
912
913 /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
914 lprof = class_get_profile(profilenm);
6e16818b 915 if (!lprof) {
2d00bd17
JP
916 LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
917 profilenm);
34e1f2bb
JL
918 err = -EINVAL;
919 goto out_free;
d7e09d03
PT
920 }
921 CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
922 lprof->lp_md, lprof->lp_dt);
923
95745e9b 924 dt = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
34e1f2bb
JL
925 if (!dt) {
926 err = -ENOMEM;
927 goto out_free;
928 }
d7e09d03 929
ef2e1a44 930 md = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_md, cfg->cfg_instance);
34e1f2bb
JL
931 if (!md) {
932 err = -ENOMEM;
933 goto out_free;
934 }
d7e09d03
PT
935
936 /* connections, registrations, sb setup */
937 err = client_common_fill_super(sb, md, dt, mnt);
0cd99931
JH
938 if (!err)
939 sbi->ll_client_common_fill_super_succeeded = 1;
d7e09d03
PT
940
941out_free:
0550db92 942 kfree(md);
943 kfree(dt);
f65053df
HN
944 if (lprof)
945 class_put_profile(lprof);
d7e09d03
PT
946 if (err)
947 ll_put_super(sb);
948 else if (sbi->ll_flags & LL_SBI_VERBOSE)
949 LCONSOLE_WARN("Mounted %s\n", profilenm);
950
97903a26 951 kfree(cfg);
0a3bdb00 952 return err;
d7e09d03
PT
953} /* ll_fill_super */
954
d7e09d03
PT
955void ll_put_super(struct super_block *sb)
956{
7d4bae45 957 struct config_llog_instance cfg, params_cfg;
d7e09d03
PT
958 struct obd_device *obd;
959 struct lustre_sb_info *lsi = s2lsi(sb);
960 struct ll_sb_info *sbi = ll_s2sbi(sb);
961 char *profilenm = get_profile_name(sb);
29c877a5
SC
962 int next, force = 1, rc = 0;
963 long ccc_count;
d7e09d03
PT
964
965 CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
966
d7e09d03
PT
967 cfg.cfg_instance = sb;
968 lustre_end_log(sb, profilenm, &cfg);
969
7d4bae45
AB
970 params_cfg.cfg_instance = sb;
971 lustre_end_log(sb, PARAMS_FILENAME, &params_cfg);
972
d7e09d03
PT
973 if (sbi->ll_md_exp) {
974 obd = class_exp2obd(sbi->ll_md_exp);
975 if (obd)
976 force = obd->obd_force;
977 }
978
ac5b1481
PS
979 /* Wait for unstable pages to be committed to stable storage */
980 if (!force) {
981 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
982
1b02bde3 983 rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
29c877a5 984 !atomic_long_read(&sbi->ll_cache->ccc_unstable_nr),
ac5b1481
PS
985 &lwi);
986 }
987
29c877a5 988 ccc_count = atomic_long_read(&sbi->ll_cache->ccc_unstable_nr);
ac5b1481 989 if (!force && rc != -EINTR)
29c877a5 990 LASSERTF(!ccc_count, "count: %li\n", ccc_count);
ac5b1481 991
d7e09d03 992 /* We need to set force before the lov_disconnect in
c0894c6c
OD
993 * lustre_common_put_super, since l_d cleans up osc's as well.
994 */
d7e09d03
PT
995 if (force) {
996 next = 0;
997 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
998 &next)) != NULL) {
999 obd->obd_force = force;
1000 }
1001 }
1002
0cd99931 1003 if (sbi->ll_client_common_fill_super_succeeded) {
d7e09d03
PT
1004 /* Only if client_common_fill_super succeeded */
1005 client_common_put_super(sb);
1006 }
1007
1008 next = 0;
a15dbf99 1009 while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)))
d7e09d03 1010 class_manual_cleanup(obd);
d7e09d03
PT
1011
1012 if (sbi->ll_flags & LL_SBI_VERBOSE)
1013 LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
1014
1015 if (profilenm)
1016 class_del_profile(profilenm);
1017
1018 if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
1019 bdi_destroy(&lsi->lsi_bdi);
1020 lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
1021 }
1022
1023 ll_free_sbi(sb);
1024 lsi->lsi_llsbi = NULL;
1025
1026 lustre_common_put_super(sb);
1027
26f98e82
JX
1028 cl_env_cache_purge(~0);
1029
d7e09d03 1030 module_put(THIS_MODULE);
d7e09d03
PT
1031} /* client_put_super */
1032
1033struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
1034{
1035 struct inode *inode = NULL;
1036
1037 /* NOTE: we depend on atomic igrab() -bzzz */
1038 lock_res_and_lock(lock);
1039 if (lock->l_resource->lr_lvb_inode) {
aff9d8e8 1040 struct ll_inode_info *lli;
cf29a7b6 1041
d7e09d03
PT
1042 lli = ll_i2info(lock->l_resource->lr_lvb_inode);
1043 if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
1044 inode = igrab(lock->l_resource->lr_lvb_inode);
1045 } else {
1046 inode = lock->l_resource->lr_lvb_inode;
1047 LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
2d00bd17 1048 D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
d7e09d03
PT
1049 lock->l_resource->lr_lvb_inode,
1050 lli->lli_inode_magic);
1051 inode = NULL;
1052 }
1053 }
1054 unlock_res_and_lock(lock);
1055 return inode;
1056}
1057
a80ba5fe 1058void ll_dir_clear_lsm_md(struct inode *inode)
2de35386 1059{
1060 struct ll_inode_info *lli = ll_i2info(inode);
1061
1062 LASSERT(S_ISDIR(inode->i_mode));
1063
1064 if (lli->lli_lsm_md) {
1065 lmv_free_memmd(lli->lli_lsm_md);
1066 lli->lli_lsm_md = NULL;
1067 }
1068}
1069
1070static struct inode *ll_iget_anon_dir(struct super_block *sb,
1071 const struct lu_fid *fid,
1072 struct lustre_md *md)
1073{
1074 struct ll_sb_info *sbi = ll_s2sbi(sb);
1075 struct mdt_body *body = md->body;
1076 struct inode *inode;
1077 ino_t ino;
1078
1079 ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
1080 inode = iget_locked(sb, ino);
1081 if (!inode) {
1082 CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
1083 ll_get_fsname(sb, NULL, 0), PFID(fid));
1084 return ERR_PTR(-ENOENT);
1085 }
1086
1087 if (inode->i_state & I_NEW) {
1088 struct ll_inode_info *lli = ll_i2info(inode);
1089 struct lmv_stripe_md *lsm = md->lmv;
1090
1091 inode->i_mode = (inode->i_mode & ~S_IFMT) |
2e1b5b8b 1092 (body->mbo_mode & S_IFMT);
2de35386 1093 LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
1094 PFID(fid));
1095
1096 LTIME_S(inode->i_mtime) = 0;
1097 LTIME_S(inode->i_atime) = 0;
1098 LTIME_S(inode->i_ctime) = 0;
1099 inode->i_rdev = 0;
1100
1101 inode->i_op = &ll_dir_inode_operations;
1102 inode->i_fop = &ll_dir_operations;
1103 lli->lli_fid = *fid;
1104 ll_lli_init(lli);
1105
1106 LASSERT(lsm);
8f18c8a4 1107 /* master object FID */
2e1b5b8b 1108 lli->lli_pfid = body->mbo_fid1;
8f18c8a4 1109 CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
2de35386 1110 lli, PFID(fid), PFID(&lli->lli_pfid));
1111 unlock_new_inode(inode);
1112 }
1113
1114 return inode;
1115}
1116
1117static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
1118{
1119 struct lmv_stripe_md *lsm = md->lmv;
1120 struct lu_fid *fid;
1121 int i;
1122
1123 LASSERT(lsm);
1124 /*
1125 * XXX sigh, this lsm_root initialization should be in
1126 * LMV layer, but it needs ll_iget right now, so we
1127 * put this here right now.
1128 */
1129 for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1130 fid = &lsm->lsm_md_oinfo[i].lmo_fid;
1131 LASSERT(!lsm->lsm_md_oinfo[i].lmo_root);
8f18c8a4 1132 /* Unfortunately ll_iget will call ll_update_inode,
1133 * where the initialization of slave inode is slightly
1134 * different, so it reset lsm_md to NULL to avoid
1135 * initializing lsm for slave inode.
1136 */
1137 /* For migrating inode, master stripe and master object will
1138 * be same, so we only need assign this inode
1139 */
1140 if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && !i)
2de35386 1141 lsm->lsm_md_oinfo[i].lmo_root = inode;
8f18c8a4 1142 else
2de35386 1143 lsm->lsm_md_oinfo[i].lmo_root =
1144 ll_iget_anon_dir(inode->i_sb, fid, md);
8f18c8a4 1145 if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
1146 int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
2de35386 1147
8f18c8a4 1148 lsm->lsm_md_oinfo[i].lmo_root = NULL;
1149 return rc;
2de35386 1150 }
1151 }
1152
15b241c5 1153 return 0;
2de35386 1154}
1155
1156static inline int lli_lsm_md_eq(const struct lmv_stripe_md *lsm_md1,
1157 const struct lmv_stripe_md *lsm_md2)
1158{
1159 return lsm_md1->lsm_md_magic == lsm_md2->lsm_md_magic &&
1160 lsm_md1->lsm_md_stripe_count == lsm_md2->lsm_md_stripe_count &&
1161 lsm_md1->lsm_md_master_mdt_index ==
1162 lsm_md2->lsm_md_master_mdt_index &&
1163 lsm_md1->lsm_md_hash_type == lsm_md2->lsm_md_hash_type &&
1164 lsm_md1->lsm_md_layout_version ==
1165 lsm_md2->lsm_md_layout_version &&
1166 !strcmp(lsm_md1->lsm_md_pool_name,
1167 lsm_md2->lsm_md_pool_name);
1168}
1169
c3397e7e 1170static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
2de35386 1171{
1172 struct ll_inode_info *lli = ll_i2info(inode);
1173 struct lmv_stripe_md *lsm = md->lmv;
8f18c8a4 1174 int rc;
2de35386 1175
2de35386 1176 LASSERT(S_ISDIR(inode->i_mode));
79496845 1177 CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
1178 PFID(ll_inode2fid(inode)));
1179
1180 /* no striped information from request. */
1181 if (!lsm) {
1182 if (!lli->lli_lsm_md) {
c3397e7e 1183 return 0;
8f18c8a4 1184 } else if (lli->lli_lsm_md->lsm_md_hash_type &
1185 LMV_HASH_FLAG_MIGRATION) {
79496845 1186 /*
1187 * migration is done, the temporay MIGRATE layout has
1188 * been removed
1189 */
1190 CDEBUG(D_INODE, DFID" finish migration.\n",
1191 PFID(ll_inode2fid(inode)));
1192 lmv_free_memmd(lli->lli_lsm_md);
1193 lli->lli_lsm_md = NULL;
c3397e7e 1194 return 0;
79496845 1195 } else {
1196 /*
1197 * The lustre_md from req does not include stripeEA,
1198 * see ll_md_setattr
1199 */
c3397e7e 1200 return 0;
79496845 1201 }
1202 }
1203
1204 /* set the directory layout */
2de35386 1205 if (!lli->lli_lsm_md) {
2de35386 1206 rc = ll_init_lsm_md(inode, md);
c3397e7e 1207 if (rc)
1208 return rc;
1209
2de35386 1210 lli->lli_lsm_md = lsm;
1211 /*
1212 * set lsm_md to NULL, so the following free lustre_md
1213 * will not free this lsm
1214 */
1215 md->lmv = NULL;
79496845 1216 CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm,
1217 lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
c3397e7e 1218 return 0;
2de35386 1219 }
1220
1221 /* Compare the old and new stripe information */
8f18c8a4 1222 if (!lsm_md_eq(lli->lli_lsm_md, lsm)) {
1223 struct lmv_stripe_md *old_lsm = lli->lli_lsm_md;
1224 int idx;
1225
1226 CERROR("%s: inode "DFID"(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
1227 ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid),
1228 inode, lsm, old_lsm,
1229 lsm->lsm_md_magic, old_lsm->lsm_md_magic,
2de35386 1230 lsm->lsm_md_stripe_count,
8f18c8a4 1231 old_lsm->lsm_md_stripe_count,
2de35386 1232 lsm->lsm_md_master_mdt_index,
8f18c8a4 1233 old_lsm->lsm_md_master_mdt_index,
1234 lsm->lsm_md_hash_type, old_lsm->lsm_md_hash_type,
2de35386 1235 lsm->lsm_md_layout_version,
8f18c8a4 1236 old_lsm->lsm_md_layout_version,
2de35386 1237 lsm->lsm_md_pool_name,
8f18c8a4 1238 old_lsm->lsm_md_pool_name);
1239
1240 for (idx = 0; idx < old_lsm->lsm_md_stripe_count; idx++) {
1241 CERROR("%s: sub FIDs in old lsm idx %d, old: "DFID"\n",
1242 ll_get_fsname(inode->i_sb, NULL, 0), idx,
1243 PFID(&old_lsm->lsm_md_oinfo[idx].lmo_fid));
1244 }
2de35386 1245
8f18c8a4 1246 for (idx = 0; idx < lsm->lsm_md_stripe_count; idx++) {
1247 CERROR("%s: sub FIDs in new lsm idx %d, new: "DFID"\n",
2de35386 1248 ll_get_fsname(inode->i_sb, NULL, 0), idx,
2de35386 1249 PFID(&lsm->lsm_md_oinfo[idx].lmo_fid));
2de35386 1250 }
8f18c8a4 1251
1252 return -EIO;
2de35386 1253 }
1254
8f18c8a4 1255 return 0;
2de35386 1256}
1257
d7e09d03
PT
1258void ll_clear_inode(struct inode *inode)
1259{
1260 struct ll_inode_info *lli = ll_i2info(inode);
1261 struct ll_sb_info *sbi = ll_i2sbi(inode);
d7e09d03 1262
97a075cd
JN
1263 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1264 PFID(ll_inode2fid(inode)), inode);
d7e09d03
PT
1265
1266 if (S_ISDIR(inode->i_mode)) {
1267 /* these should have been cleared in ll_file_release */
6e16818b
OD
1268 LASSERT(!lli->lli_opendir_key);
1269 LASSERT(!lli->lli_sai);
d7e09d03
PT
1270 LASSERT(lli->lli_opendir_pid == 0);
1271 }
1272
d7e09d03
PT
1273 md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
1274
1275 LASSERT(!lli->lli_open_fd_write_count);
1276 LASSERT(!lli->lli_open_fd_read_count);
1277 LASSERT(!lli->lli_open_fd_exec_count);
1278
1279 if (lli->lli_mds_write_och)
1280 ll_md_real_close(inode, FMODE_WRITE);
1281 if (lli->lli_mds_exec_och)
1282 ll_md_real_close(inode, FMODE_EXEC);
1283 if (lli->lli_mds_read_och)
1284 ll_md_real_close(inode, FMODE_READ);
1285
a5cb8880 1286 if (S_ISLNK(inode->i_mode)) {
97903a26 1287 kfree(lli->lli_symlink_name);
d7e09d03
PT
1288 lli->lli_symlink_name = NULL;
1289 }
1290
7fc1f831
AP
1291 ll_xattr_cache_destroy(inode);
1292
d7e09d03 1293#ifdef CONFIG_FS_POSIX_ACL
341f1f0a 1294 if (lli->lli_posix_acl) {
d7e09d03
PT
1295 posix_acl_release(lli->lli_posix_acl);
1296 lli->lli_posix_acl = NULL;
1297 }
1298#endif
1299 lli->lli_inode_magic = LLI_INODE_DEAD;
1300
2de35386 1301 if (S_ISDIR(inode->i_mode))
1302 ll_dir_clear_lsm_md(inode);
c3397e7e 1303 if (S_ISREG(inode->i_mode) && !is_bad_inode(inode))
d7e09d03
PT
1304 LASSERT(list_empty(&lli->lli_agl_list));
1305
1306 /*
1307 * XXX This has to be done before lsm is freed below, because
1308 * cl_object still uses inode lsm.
1309 */
1310 cl_inode_fini(inode);
d7e09d03
PT
1311}
1312
b81f9b6d
OD
1313#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
1314
f28f1a45 1315static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
d7e09d03
PT
1316{
1317 struct lustre_md md;
2b0143b5 1318 struct inode *inode = d_inode(dentry);
d7e09d03
PT
1319 struct ll_sb_info *sbi = ll_i2sbi(inode);
1320 struct ptlrpc_request *request = NULL;
1321 int rc, ia_valid;
d7e09d03
PT
1322
1323 op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
1324 LUSTRE_OPC_ANY, NULL);
1325 if (IS_ERR(op_data))
0a3bdb00 1326 return PTR_ERR(op_data);
d7e09d03 1327
f28f1a45 1328 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
d7e09d03
PT
1329 if (rc) {
1330 ptlrpc_req_finished(request);
1331 if (rc == -ENOENT) {
1332 clear_nlink(inode);
1333 /* Unlinked special device node? Or just a race?
c0894c6c
OD
1334 * Pretend we did everything.
1335 */
d7e09d03
PT
1336 if (!S_ISREG(inode->i_mode) &&
1337 !S_ISDIR(inode->i_mode)) {
1338 ia_valid = op_data->op_attr.ia_valid;
1339 op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
1340 rc = simple_setattr(dentry, &op_data->op_attr);
1341 op_data->op_attr.ia_valid = ia_valid;
1342 }
1343 } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
1344 CERROR("md_setattr fails: rc = %d\n", rc);
1345 }
0a3bdb00 1346 return rc;
d7e09d03
PT
1347 }
1348
1349 rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
1350 sbi->ll_md_exp, &md);
1351 if (rc) {
1352 ptlrpc_req_finished(request);
0a3bdb00 1353 return rc;
d7e09d03
PT
1354 }
1355
251c4317 1356 ia_valid = op_data->op_attr.ia_valid;
ef2e0f55 1357 /* inode size will be in cl_setattr_ost, can't do it now since dirty
c0894c6c
OD
1358 * cache is not cleared yet.
1359 */
251c4317
JH
1360 op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
1361 rc = simple_setattr(dentry, &op_data->op_attr);
1362 op_data->op_attr.ia_valid = ia_valid;
1363
c3397e7e 1364 rc = ll_update_inode(inode, &md);
d7e09d03
PT
1365 ptlrpc_req_finished(request);
1366
0a3bdb00 1367 return rc;
d7e09d03
PT
1368}
1369
d7e09d03
PT
1370/* If this inode has objects allocated to it (lsm != NULL), then the OST
1371 * object(s) determine the file size and mtime. Otherwise, the MDS will
1372 * keep these values until such a time that objects are allocated for it.
1373 * We do the MDS operations first, as it is checking permissions for us.
1374 * We don't to the MDS RPC if there is nothing that we want to store there,
1375 * otherwise there is no harm in updating mtime/atime on the MDS if we are
1376 * going to do an RPC anyways.
1377 *
1378 * If we are doing a truncate, we will send the mtime and ctime updates
1379 * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
1380 * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
1381 * at the same time.
a720b790
JL
1382 *
1383 * In case of HSMimport, we only set attr on MDS.
d7e09d03 1384 */
a720b790 1385int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
d7e09d03 1386{
2b0143b5 1387 struct inode *inode = d_inode(dentry);
d7e09d03
PT
1388 struct ll_inode_info *lli = ll_i2info(inode);
1389 struct md_op_data *op_data = NULL;
5ea17d6c 1390 bool file_is_released = false;
0cd99931 1391 int rc = 0;
d7e09d03 1392
97a075cd
JN
1393 CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
1394 ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
1395 i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
d7e09d03
PT
1396
1397 if (attr->ia_valid & ATTR_SIZE) {
1398 /* Check new size against VFS/VM file size limit and rlimit */
1399 rc = inode_newsize_ok(inode, attr->ia_size);
1400 if (rc)
0a3bdb00 1401 return rc;
d7e09d03
PT
1402
1403 /* The maximum Lustre file size is variable, based on the
1404 * OST maximum object size and number of stripes. This
c0894c6c
OD
1405 * needs another check in addition to the VFS check above.
1406 */
d7e09d03 1407 if (attr->ia_size > ll_file_maxbytes(inode)) {
1d8cb70c 1408 CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
d7e09d03
PT
1409 PFID(&lli->lli_fid), attr->ia_size,
1410 ll_file_maxbytes(inode));
0a3bdb00 1411 return -EFBIG;
d7e09d03
PT
1412 }
1413
1414 attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
1415 }
1416
31051c85 1417 /* POSIX: check before ATTR_*TIME_SET set (from setattr_prepare) */
d7e09d03 1418 if (attr->ia_valid & TIMES_SET_FLAGS) {
4b1a25f0 1419 if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
2eb90a75 1420 !capable(CFS_CAP_FOWNER))
0a3bdb00 1421 return -EPERM;
d7e09d03
PT
1422 }
1423
1424 /* We mark all of the fields "set" so MDS/OST does not re-set them */
1425 if (attr->ia_valid & ATTR_CTIME) {
0f1c743b 1426 attr->ia_ctime = CURRENT_TIME;
d7e09d03
PT
1427 attr->ia_valid |= ATTR_CTIME_SET;
1428 }
1429 if (!(attr->ia_valid & ATTR_ATIME_SET) &&
1430 (attr->ia_valid & ATTR_ATIME)) {
0f1c743b 1431 attr->ia_atime = CURRENT_TIME;
d7e09d03
PT
1432 attr->ia_valid |= ATTR_ATIME_SET;
1433 }
1434 if (!(attr->ia_valid & ATTR_MTIME_SET) &&
1435 (attr->ia_valid & ATTR_MTIME)) {
0f1c743b 1436 attr->ia_mtime = CURRENT_TIME;
d7e09d03
PT
1437 attr->ia_valid |= ATTR_MTIME_SET;
1438 }
1439
1440 if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
8d7eed54 1441 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %llu\n",
d7e09d03 1442 LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
8d7eed54 1443 (s64)ktime_get_real_seconds());
d7e09d03 1444
d7e09d03 1445 /* We always do an MDS RPC, even if we're only changing the size;
c0894c6c
OD
1446 * only the MDS knows whether truncate() should fail with -ETXTBUSY
1447 */
d7e09d03 1448
496a51bd
JL
1449 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
1450 if (!op_data)
0a3bdb00 1451 return -ENOMEM;
d7e09d03 1452
81e053c7 1453 if (!S_ISDIR(inode->i_mode))
5955102c 1454 inode_unlock(inode);
d7e09d03 1455
5ea17d6c
JL
1456 /* truncate on a released file must failed with -ENODATA,
1457 * so size must not be set on MDS for released file
1458 * but other attributes must be set
1459 */
1460 if (S_ISREG(inode->i_mode)) {
55554f31
JH
1461 struct cl_layout cl = {
1462 .cl_is_released = false,
1463 };
1464 struct lu_env *env;
1465 int refcheck;
5ea17d6c
JL
1466 __u32 gen;
1467
55554f31
JH
1468 rc = ll_layout_refresh(inode, &gen);
1469 if (rc < 0)
1470 goto out;
1471
1472 /*
1473 * XXX: the only place we need to know the layout type,
1474 * this will be removed by a later patch. -Jinshan
1475 */
1476 env = cl_env_get(&refcheck);
1477 if (IS_ERR(env)) {
1478 rc = PTR_ERR(env);
1479 goto out;
1480 }
1481
1482 rc = cl_object_layout_get(env, lli->lli_clob, &cl);
1483 cl_env_put(env, &refcheck);
1484 if (rc < 0)
1485 goto out;
1486
1487 file_is_released = cl.cl_is_released;
1b1594da
JX
1488
1489 if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
1490 if (file_is_released) {
1491 rc = ll_layout_restore(inode, 0, attr->ia_size);
1492 if (rc < 0)
1493 goto out;
1494
1495 file_is_released = false;
1496 ll_layout_refresh(inode, &gen);
1497 }
1498
1499 /*
1500 * If we are changing file size, file content is
1501 * modified, flag it.
1502 */
1503 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1504 spin_lock(&lli->lli_lock);
1505 lli->lli_flags |= LLIF_DATA_MODIFIED;
1506 spin_unlock(&lli->lli_lock);
1507 op_data->op_bias |= MDS_DATA_MODIFIED;
1508 }
5ea17d6c
JL
1509 }
1510
1b1594da
JX
1511 memcpy(&op_data->op_attr, attr, sizeof(*attr));
1512
f28f1a45 1513 rc = ll_md_setattr(dentry, op_data);
d7e09d03 1514 if (rc)
34e1f2bb 1515 goto out;
d7e09d03
PT
1516
1517 /* RPC to MDT is sent, cancel data modification flag */
5210a63a 1518 if (op_data->op_bias & MDS_DATA_MODIFIED) {
d7e09d03
PT
1519 spin_lock(&lli->lli_lock);
1520 lli->lli_flags &= ~LLIF_DATA_MODIFIED;
1521 spin_unlock(&lli->lli_lock);
1522 }
1523
1b1594da 1524 if (!S_ISREG(inode->i_mode) || file_is_released) {
34e1f2bb
JL
1525 rc = 0;
1526 goto out;
1527 }
d7e09d03
PT
1528
1529 if (attr->ia_valid & (ATTR_SIZE |
1530 ATTR_ATIME | ATTR_ATIME_SET |
53bd4a00 1531 ATTR_MTIME | ATTR_MTIME_SET)) {
d7e09d03
PT
1532 /* For truncate and utimes sending attributes to OSTs, setting
1533 * mtime/atime to the past will be performed under PW [0:EOF]
1534 * extent lock (new_size:EOF for truncate). It may seem
1535 * excessive to send mtime/atime updates to OSTs when not
1536 * setting times to past, but it is necessary due to possible
c0894c6c
OD
1537 * time de-synchronization between MDT inode and OST objects
1538 */
178ba1e0
BJ
1539 if (attr->ia_valid & ATTR_SIZE)
1540 down_write(&lli->lli_trunc_sem);
933eb397 1541 rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, attr, 0);
178ba1e0
BJ
1542 if (attr->ia_valid & ATTR_SIZE)
1543 up_write(&lli->lli_trunc_sem);
53bd4a00 1544 }
d7e09d03 1545out:
0cd99931
JH
1546 if (op_data)
1547 ll_finish_md_op_data(op_data);
83d6b8fe 1548
d7e09d03 1549 if (!S_ISDIR(inode->i_mode)) {
5955102c 1550 inode_lock(inode);
a720b790 1551 if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
d7e09d03
PT
1552 inode_dio_wait(inode);
1553 }
1554
1555 ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
1556 LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
1557
251c4317 1558 return rc;
d7e09d03
PT
1559}
1560
1561int ll_setattr(struct dentry *de, struct iattr *attr)
1562{
2b0143b5 1563 int mode = d_inode(de)->i_mode;
d7e09d03 1564
cd94f231
OD
1565 if ((attr->ia_valid & (ATTR_CTIME | ATTR_SIZE | ATTR_MODE)) ==
1566 (ATTR_CTIME | ATTR_SIZE | ATTR_MODE))
d7e09d03
PT
1567 attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
1568
cd94f231
OD
1569 if (((attr->ia_valid & (ATTR_MODE | ATTR_FORCE | ATTR_SIZE)) ==
1570 (ATTR_SIZE | ATTR_MODE)) &&
d7e09d03 1571 (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
cd94f231 1572 (((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) &&
d7e09d03
PT
1573 !(attr->ia_mode & S_ISGID))))
1574 attr->ia_valid |= ATTR_FORCE;
1575
98639249
NC
1576 if ((attr->ia_valid & ATTR_MODE) &&
1577 (mode & S_ISUID) &&
d7e09d03
PT
1578 !(attr->ia_mode & S_ISUID) &&
1579 !(attr->ia_valid & ATTR_KILL_SUID))
1580 attr->ia_valid |= ATTR_KILL_SUID;
1581
98639249 1582 if ((attr->ia_valid & ATTR_MODE) &&
cd94f231 1583 ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) &&
d7e09d03
PT
1584 !(attr->ia_mode & S_ISGID) &&
1585 !(attr->ia_valid & ATTR_KILL_SGID))
1586 attr->ia_valid |= ATTR_KILL_SGID;
1587
a720b790 1588 return ll_setattr_raw(de, attr, false);
d7e09d03
PT
1589}
1590
1591int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
1592 __u64 max_age, __u32 flags)
1593{
1594 struct ll_sb_info *sbi = ll_s2sbi(sb);
1595 struct obd_statfs obd_osfs;
1596 int rc;
d7e09d03
PT
1597
1598 rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
1599 if (rc) {
1600 CERROR("md_statfs fails: rc = %d\n", rc);
0a3bdb00 1601 return rc;
d7e09d03
PT
1602 }
1603
1604 osfs->os_type = sb->s_magic;
1605
b0f5aad5 1606 CDEBUG(D_SUPER, "MDC blocks %llu/%llu objects %llu/%llu\n",
1d8cb70c
GD
1607 osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,
1608 osfs->os_files);
d7e09d03
PT
1609
1610 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
1611 flags |= OBD_STATFS_NODELAY;
1612
1613 rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
1614 if (rc) {
1615 CERROR("obd_statfs fails: rc = %d\n", rc);
0a3bdb00 1616 return rc;
d7e09d03
PT
1617 }
1618
b0f5aad5 1619 CDEBUG(D_SUPER, "OSC blocks %llu/%llu objects %llu/%llu\n",
d7e09d03
PT
1620 obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
1621 obd_osfs.os_files);
1622
1623 osfs->os_bsize = obd_osfs.os_bsize;
1624 osfs->os_blocks = obd_osfs.os_blocks;
1625 osfs->os_bfree = obd_osfs.os_bfree;
1626 osfs->os_bavail = obd_osfs.os_bavail;
1627
1628 /* If we don't have as many objects free on the OST as inodes
1629 * on the MDS, we reduce the total number of inodes to
1630 * compensate, so that the "inodes in use" number is correct.
1631 */
1632 if (obd_osfs.os_ffree < osfs->os_ffree) {
1633 osfs->os_files = (osfs->os_files - osfs->os_ffree) +
1634 obd_osfs.os_ffree;
1635 osfs->os_ffree = obd_osfs.os_ffree;
1636 }
1637
0a3bdb00 1638 return rc;
d7e09d03 1639}
c9f6bb96 1640
d7e09d03
PT
1641int ll_statfs(struct dentry *de, struct kstatfs *sfs)
1642{
1643 struct super_block *sb = de->d_sb;
1644 struct obd_statfs osfs;
1645 int rc;
1646
b0f5aad5 1647 CDEBUG(D_VFSTRACE, "VFS Op: at %llu jiffies\n", get_jiffies_64());
d7e09d03
PT
1648 ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
1649
1650 /* Some amount of caching on the client is allowed */
1651 rc = ll_statfs_internal(sb, &osfs,
1652 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
1653 0);
1654 if (rc)
1655 return rc;
1656
1657 statfs_unpack(sfs, &osfs);
1658
1659 /* We need to downshift for all 32-bit kernels, because we can't
1660 * tell if the kernel is being called via sys_statfs64() or not.
1661 * Stop before overflowing f_bsize - in which case it is better
c0894c6c
OD
1662 * to just risk EOVERFLOW if caller is using old sys_statfs().
1663 */
d7e09d03
PT
1664 if (sizeof(long) < 8) {
1665 while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
1666 sfs->f_bsize <<= 1;
1667
1668 osfs.os_blocks >>= 1;
1669 osfs.os_bfree >>= 1;
1670 osfs.os_bavail >>= 1;
1671 }
1672 }
1673
1674 sfs->f_blocks = osfs.os_blocks;
1675 sfs->f_bfree = osfs.os_bfree;
1676 sfs->f_bavail = osfs.os_bavail;
bd994071 1677 sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
d7e09d03
PT
1678 return 0;
1679}
1680
1681void ll_inode_size_lock(struct inode *inode)
1682{
1683 struct ll_inode_info *lli;
1684
1685 LASSERT(!S_ISDIR(inode->i_mode));
1686
1687 lli = ll_i2info(inode);
47a57bde 1688 mutex_lock(&lli->lli_size_mutex);
d7e09d03
PT
1689}
1690
1691void ll_inode_size_unlock(struct inode *inode)
1692{
1693 struct ll_inode_info *lli;
1694
1695 lli = ll_i2info(inode);
47a57bde 1696 mutex_unlock(&lli->lli_size_mutex);
d7e09d03
PT
1697}
1698
c3397e7e 1699int ll_update_inode(struct inode *inode, struct lustre_md *md)
d7e09d03
PT
1700{
1701 struct ll_inode_info *lli = ll_i2info(inode);
1702 struct mdt_body *body = md->body;
d7e09d03
PT
1703 struct ll_sb_info *sbi = ll_i2sbi(inode);
1704
55051039 1705 if (body->mbo_valid & OBD_MD_FLEASIZE)
85cb63bc 1706 cl_file_inode_init(inode, md);
d7e09d03 1707
c3397e7e 1708 if (S_ISDIR(inode->i_mode)) {
1709 int rc;
1710
1711 rc = ll_update_lsm_md(inode, md);
1712 if (rc)
1713 return rc;
1714 }
2de35386 1715
d7e09d03 1716#ifdef CONFIG_FS_POSIX_ACL
2e1b5b8b 1717 if (body->mbo_valid & OBD_MD_FLACL) {
d7e09d03
PT
1718 spin_lock(&lli->lli_lock);
1719 if (lli->lli_posix_acl)
1720 posix_acl_release(lli->lli_posix_acl);
1721 lli->lli_posix_acl = md->posix_acl;
1722 spin_unlock(&lli->lli_lock);
1723 }
1724#endif
2e1b5b8b 1725 inode->i_ino = cl_fid_build_ino(&body->mbo_fid1,
c1e2699d 1726 sbi->ll_flags & LL_SBI_32BIT_API);
2e1b5b8b 1727 inode->i_generation = cl_fid_build_gen(&body->mbo_fid1);
d7e09d03 1728
2e1b5b8b
JH
1729 if (body->mbo_valid & OBD_MD_FLATIME) {
1730 if (body->mbo_atime > LTIME_S(inode->i_atime))
1731 LTIME_S(inode->i_atime) = body->mbo_atime;
1732 lli->lli_atime = body->mbo_atime;
d7e09d03 1733 }
2e1b5b8b
JH
1734 if (body->mbo_valid & OBD_MD_FLMTIME) {
1735 if (body->mbo_mtime > LTIME_S(inode->i_mtime)) {
b0f5aad5
GKH
1736 CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
1737 inode->i_ino, LTIME_S(inode->i_mtime),
2e1b5b8b
JH
1738 body->mbo_mtime);
1739 LTIME_S(inode->i_mtime) = body->mbo_mtime;
d7e09d03 1740 }
2e1b5b8b 1741 lli->lli_mtime = body->mbo_mtime;
d7e09d03 1742 }
2e1b5b8b
JH
1743 if (body->mbo_valid & OBD_MD_FLCTIME) {
1744 if (body->mbo_ctime > LTIME_S(inode->i_ctime))
1745 LTIME_S(inode->i_ctime) = body->mbo_ctime;
1746 lli->lli_ctime = body->mbo_ctime;
d7e09d03 1747 }
2e1b5b8b 1748 if (body->mbo_valid & OBD_MD_FLMODE)
cd94f231
OD
1749 inode->i_mode = (inode->i_mode & S_IFMT) |
1750 (body->mbo_mode & ~S_IFMT);
2e1b5b8b 1751 if (body->mbo_valid & OBD_MD_FLTYPE)
cd94f231
OD
1752 inode->i_mode = (inode->i_mode & ~S_IFMT) |
1753 (body->mbo_mode & S_IFMT);
d7e09d03 1754 LASSERT(inode->i_mode != 0);
566be54d 1755 if (S_ISREG(inode->i_mode))
e6768831
TJ
1756 inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
1757 LL_MAX_BLKSIZE_BITS);
566be54d 1758 else
d7e09d03 1759 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
2e1b5b8b
JH
1760 if (body->mbo_valid & OBD_MD_FLUID)
1761 inode->i_uid = make_kuid(&init_user_ns, body->mbo_uid);
1762 if (body->mbo_valid & OBD_MD_FLGID)
1763 inode->i_gid = make_kgid(&init_user_ns, body->mbo_gid);
1764 if (body->mbo_valid & OBD_MD_FLFLAGS)
1765 inode->i_flags = ll_ext_to_inode_flags(body->mbo_flags);
1766 if (body->mbo_valid & OBD_MD_FLNLINK)
1767 set_nlink(inode, body->mbo_nlink);
1768 if (body->mbo_valid & OBD_MD_FLRDEV)
1769 inode->i_rdev = old_decode_dev(body->mbo_rdev);
1770
1771 if (body->mbo_valid & OBD_MD_FLID) {
d7e09d03
PT
1772 /* FID shouldn't be changed! */
1773 if (fid_is_sane(&lli->lli_fid)) {
2e1b5b8b 1774 LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
97a075cd 1775 "Trying to change FID "DFID" to the "DFID", inode "DFID"(%p)\n",
2e1b5b8b 1776 PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
97a075cd 1777 PFID(ll_inode2fid(inode)), inode);
da5ecb4d 1778 } else {
2e1b5b8b 1779 lli->lli_fid = body->mbo_fid1;
da5ecb4d 1780 }
d7e09d03
PT
1781 }
1782
1783 LASSERT(fid_seq(&lli->lli_fid) != 0);
1784
2e1b5b8b 1785 if (body->mbo_valid & OBD_MD_FLSIZE) {
0cd99931 1786 i_size_write(inode, body->mbo_size);
d7e09d03 1787
0cd99931
JH
1788 CDEBUG(D_VFSTRACE, "inode=" DFID ", updating i_size %llu\n",
1789 PFID(ll_inode2fid(inode)),
1790 (unsigned long long)body->mbo_size);
d7e09d03 1791
2e1b5b8b
JH
1792 if (body->mbo_valid & OBD_MD_FLBLOCKS)
1793 inode->i_blocks = body->mbo_blocks;
d7e09d03
PT
1794 }
1795
2e1b5b8b
JH
1796 if (body->mbo_valid & OBD_MD_TSTATE) {
1797 if (body->mbo_t_state & MS_RESTORE)
5ea17d6c
JL
1798 lli->lli_flags |= LLIF_FILE_RESTORING;
1799 }
c3397e7e 1800
1801 return 0;
d7e09d03
PT
1802}
1803
c3397e7e 1804int ll_read_inode2(struct inode *inode, void *opaque)
d7e09d03
PT
1805{
1806 struct lustre_md *md = opaque;
1807 struct ll_inode_info *lli = ll_i2info(inode);
c3397e7e 1808 int rc;
d7e09d03
PT
1809
1810 CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
1811 PFID(&lli->lli_fid), inode);
1812
d7e09d03
PT
1813 /* Core attributes from the MDS first. This is a new inode, and
1814 * the VFS doesn't zero times in the core inode so we have to do
1815 * it ourselves. They will be overwritten by either MDS or OST
c0894c6c
OD
1816 * attributes - we just need to make sure they aren't newer.
1817 */
d7e09d03
PT
1818 LTIME_S(inode->i_mtime) = 0;
1819 LTIME_S(inode->i_atime) = 0;
1820 LTIME_S(inode->i_ctime) = 0;
1821 inode->i_rdev = 0;
c3397e7e 1822 rc = ll_update_inode(inode, md);
1823 if (rc)
1824 return rc;
d7e09d03
PT
1825
1826 /* OIDEBUG(inode); */
1827
d7e09d03
PT
1828 if (S_ISREG(inode->i_mode)) {
1829 struct ll_sb_info *sbi = ll_i2sbi(inode);
cf29a7b6 1830
d7e09d03
PT
1831 inode->i_op = &ll_file_inode_operations;
1832 inode->i_fop = sbi->ll_fop;
1833 inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
d7e09d03
PT
1834 } else if (S_ISDIR(inode->i_mode)) {
1835 inode->i_op = &ll_dir_inode_operations;
1836 inode->i_fop = &ll_dir_operations;
d7e09d03
PT
1837 } else if (S_ISLNK(inode->i_mode)) {
1838 inode->i_op = &ll_fast_symlink_inode_operations;
d7e09d03
PT
1839 } else {
1840 inode->i_op = &ll_special_inode_operations;
1841
1842 init_special_inode(inode, inode->i_mode,
1843 inode->i_rdev);
d7e09d03 1844 }
c3397e7e 1845
1846 return 0;
d7e09d03
PT
1847}
1848
1849void ll_delete_inode(struct inode *inode)
1850{
1929c433 1851 struct ll_inode_info *lli = ll_i2info(inode);
d7e09d03 1852
6e16818b 1853 if (S_ISREG(inode->i_mode) && lli->lli_clob)
d7e09d03 1854 /* discard all dirty pages before truncating them, required by
c0894c6c
OD
1855 * osc_extent implementation at LU-1030.
1856 */
65fb55d1 1857 cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
7510c5ca 1858 CL_FSYNC_LOCAL, 1);
d7e09d03 1859
91b0abe3 1860 truncate_inode_pages_final(&inode->i_data);
d7e09d03 1861
7510c5ca
YS
1862 LASSERTF(!inode->i_data.nrpages,
1863 "inode=" DFID "(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
1864 PFID(ll_inode2fid(inode)), inode, inode->i_data.nrpages);
d7e09d03
PT
1865
1866 ll_clear_inode(inode);
1867 clear_inode(inode);
d7e09d03
PT
1868}
1869
1870int ll_iocontrol(struct inode *inode, struct file *file,
1871 unsigned int cmd, unsigned long arg)
1872{
1873 struct ll_sb_info *sbi = ll_i2sbi(inode);
1874 struct ptlrpc_request *req = NULL;
1875 int rc, flags = 0;
d7e09d03 1876
a58a38ac 1877 switch (cmd) {
d7e09d03
PT
1878 case FSFILT_IOC_GETFLAGS: {
1879 struct mdt_body *body;
1880 struct md_op_data *op_data;
1881
1882 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
1883 0, 0, LUSTRE_OPC_ANY,
1884 NULL);
1885 if (IS_ERR(op_data))
0a3bdb00 1886 return PTR_ERR(op_data);
d7e09d03
PT
1887
1888 op_data->op_valid = OBD_MD_FLFLAGS;
1889 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
1890 ll_finish_md_op_data(op_data);
1891 if (rc) {
97a075cd
JN
1892 CERROR("%s: failure inode "DFID": rc = %d\n",
1893 sbi->ll_md_exp->exp_obd->obd_name,
1894 PFID(ll_inode2fid(inode)), rc);
0a3bdb00 1895 return -abs(rc);
d7e09d03
PT
1896 }
1897
1898 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1899
2e1b5b8b 1900 flags = body->mbo_flags;
d7e09d03
PT
1901
1902 ptlrpc_req_finished(req);
1903
7ac5db21 1904 return put_user(flags, (int __user *)arg);
d7e09d03
PT
1905 }
1906 case FSFILT_IOC_SETFLAGS: {
d7e09d03 1907 struct md_op_data *op_data;
933eb397
JH
1908 struct cl_object *obj;
1909 struct iattr *attr;
d7e09d03 1910
7ac5db21 1911 if (get_user(flags, (int __user *)arg))
0a3bdb00 1912 return -EFAULT;
d7e09d03
PT
1913
1914 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
1915 LUSTRE_OPC_ANY, NULL);
1916 if (IS_ERR(op_data))
0a3bdb00 1917 return PTR_ERR(op_data);
d7e09d03 1918
bb41292b 1919 op_data->op_attr_flags = flags;
d7e09d03 1920 op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
f28f1a45 1921 rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
d7e09d03
PT
1922 ll_finish_md_op_data(op_data);
1923 ptlrpc_req_finished(req);
1924 if (rc)
0a3bdb00 1925 return rc;
d7e09d03
PT
1926
1927 inode->i_flags = ll_ext_to_inode_flags(flags);
1928
933eb397
JH
1929 obj = ll_i2info(inode)->lli_clob;
1930 if (!obj)
0a3bdb00 1931 return 0;
d7e09d03 1932
933eb397
JH
1933 attr = kzalloc(sizeof(*attr), GFP_NOFS);
1934 if (!attr)
0a3bdb00 1935 return -ENOMEM;
d7e09d03 1936
933eb397
JH
1937 attr->ia_valid = ATTR_ATTR_FLAG;
1938 rc = cl_setattr_ost(obj, attr, flags);
1939 kfree(attr);
0a3bdb00 1940 return rc;
d7e09d03
PT
1941 }
1942 default:
0a3bdb00 1943 return -ENOSYS;
d7e09d03
PT
1944 }
1945
0a3bdb00 1946 return 0;
d7e09d03
PT
1947}
1948
1949int ll_flush_ctx(struct inode *inode)
1950{
1951 struct ll_sb_info *sbi = ll_i2sbi(inode);
1952
4b1a25f0 1953 CDEBUG(D_SEC, "flush context for user %d\n",
e15ba45d 1954 from_kuid(&init_user_ns, current_uid()));
d7e09d03
PT
1955
1956 obd_set_info_async(NULL, sbi->ll_md_exp,
1957 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1958 0, NULL, NULL);
1959 obd_set_info_async(NULL, sbi->ll_dt_exp,
1960 sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
1961 0, NULL, NULL);
1962 return 0;
1963}
1964
1965/* umount -f client means force down, don't save state */
1966void ll_umount_begin(struct super_block *sb)
1967{
1968 struct ll_sb_info *sbi = ll_s2sbi(sb);
1969 struct obd_device *obd;
1970 struct obd_ioctl_data *ioc_data;
d7e09d03
PT
1971
1972 CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
1973 sb->s_count, atomic_read(&sb->s_active));
1974
1975 obd = class_exp2obd(sbi->ll_md_exp);
6e16818b 1976 if (!obd) {
55f5a824 1977 CERROR("Invalid MDC connection handle %#llx\n",
d7e09d03 1978 sbi->ll_md_exp->exp_handle.h_cookie);
d7e09d03
PT
1979 return;
1980 }
1981 obd->obd_force = 1;
1982
1983 obd = class_exp2obd(sbi->ll_dt_exp);
6e16818b 1984 if (!obd) {
55f5a824 1985 CERROR("Invalid LOV connection handle %#llx\n",
d7e09d03 1986 sbi->ll_dt_exp->exp_handle.h_cookie);
d7e09d03
PT
1987 return;
1988 }
1989 obd->obd_force = 1;
1990
496a51bd 1991 ioc_data = kzalloc(sizeof(*ioc_data), GFP_NOFS);
d7e09d03
PT
1992 if (ioc_data) {
1993 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
ec83e611 1994 sizeof(*ioc_data), ioc_data, NULL);
d7e09d03
PT
1995
1996 obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
ec83e611 1997 sizeof(*ioc_data), ioc_data, NULL);
d7e09d03 1998
97903a26 1999 kfree(ioc_data);
d7e09d03
PT
2000 }
2001
d7e09d03
PT
2002 /* Really, we'd like to wait until there are no requests outstanding,
2003 * and then continue. For now, we just invalidate the requests,
2004 * schedule() and sleep one second if needed, and hope.
2005 */
2006 schedule();
d7e09d03
PT
2007}
2008
2009int ll_remount_fs(struct super_block *sb, int *flags, char *data)
2010{
2011 struct ll_sb_info *sbi = ll_s2sbi(sb);
2012 char *profilenm = get_profile_name(sb);
2013 int err;
2014 __u32 read_only;
2015
2016 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
2017 read_only = *flags & MS_RDONLY;
2018 err = obd_set_info_async(NULL, sbi->ll_md_exp,
2019 sizeof(KEY_READ_ONLY),
2020 KEY_READ_ONLY, sizeof(read_only),
2021 &read_only, NULL);
2022 if (err) {
2023 LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
2024 profilenm, read_only ?
2025 "read-only" : "read-write", err);
2026 return err;
2027 }
2028
2029 if (read_only)
2030 sb->s_flags |= MS_RDONLY;
2031 else
2032 sb->s_flags &= ~MS_RDONLY;
2033
2034 if (sbi->ll_flags & LL_SBI_VERBOSE)
2035 LCONSOLE_WARN("Remounted %s %s\n", profilenm,
2036 read_only ? "read-only" : "read-write");
2037 }
2038 return 0;
2039}
2040
44ecac68
FY
2041/**
2042 * Cleanup the open handle that is cached on MDT-side.
2043 *
2044 * For open case, the client side open handling thread may hit error
2045 * after the MDT grant the open. Under such case, the client should
2046 * send close RPC to the MDT as cleanup; otherwise, the open handle
2047 * on the MDT will be leaked there until the client umount or evicted.
2048 *
2049 * In further, if someone unlinked the file, because the open handle
2050 * holds the reference on such file/object, then it will block the
2051 * subsequent threads that want to locate such object via FID.
2052 *
2053 * \param[in] sb super block for this file-system
2054 * \param[in] open_req pointer to the original open request
2055 */
2056void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
2057{
2058 struct mdt_body *body;
2059 struct md_op_data *op_data;
2060 struct ptlrpc_request *close_req = NULL;
2061 struct obd_export *exp = ll_s2sbi(sb)->ll_md_exp;
2062
2063 body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
af13af52 2064 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
83ea341d 2065 if (!op_data)
44ecac68 2066 return;
44ecac68 2067
2e1b5b8b 2068 op_data->op_fid1 = body->mbo_fid1;
2e1b5b8b 2069 op_data->op_handle = body->mbo_handle;
44ecac68
FY
2070 op_data->op_mod_time = get_seconds();
2071 md_close(exp, op_data, NULL, &close_req);
2072 ptlrpc_req_finished(close_req);
2073 ll_finish_md_op_data(op_data);
2074}
2075
d7e09d03
PT
2076int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
2077 struct super_block *sb, struct lookup_intent *it)
2078{
2079 struct ll_sb_info *sbi = NULL;
24af3e16 2080 struct lustre_md md = { NULL };
d7e09d03 2081 int rc;
d7e09d03
PT
2082
2083 LASSERT(*inode || sb);
2084 sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
2085 rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
2086 sbi->ll_md_exp, &md);
2087 if (rc)
44ecac68 2088 goto cleanup;
d7e09d03
PT
2089
2090 if (*inode) {
c3397e7e 2091 rc = ll_update_inode(*inode, &md);
2092 if (rc)
2093 goto out;
d7e09d03 2094 } else {
6e16818b 2095 LASSERT(sb);
d7e09d03
PT
2096
2097 /*
2098 * At this point server returns to client's same fid as client
2099 * generated for creating. So using ->fid1 is okay here.
2100 */
2e1b5b8b 2101 if (!fid_is_sane(&md.body->mbo_fid1)) {
c681528a
SC
2102 CERROR("%s: Fid is insane " DFID "\n",
2103 ll_get_fsname(sb, NULL, 0),
2e1b5b8b 2104 PFID(&md.body->mbo_fid1));
c681528a
SC
2105 rc = -EINVAL;
2106 goto out;
2107 }
d7e09d03 2108
2e1b5b8b 2109 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->mbo_fid1,
c1e2699d 2110 sbi->ll_flags & LL_SBI_32BIT_API),
d7e09d03 2111 &md);
c3397e7e 2112 if (IS_ERR(*inode)) {
d7e09d03
PT
2113#ifdef CONFIG_FS_POSIX_ACL
2114 if (md.posix_acl) {
2115 posix_acl_release(md.posix_acl);
2116 md.posix_acl = NULL;
2117 }
2118#endif
020ecc6f 2119 rc = -ENOMEM;
d7e09d03 2120 CERROR("new_inode -fatal: rc %d\n", rc);
34e1f2bb 2121 goto out;
d7e09d03
PT
2122 }
2123 }
2124
2125 /* Handling piggyback layout lock.
2126 * Layout lock can be piggybacked by getattr and open request.
2127 * The lsm can be applied to inode only if it comes with a layout lock
2128 * otherwise correct layout may be overwritten, for example:
2129 * 1. proc1: mdt returns a lsm but not granting layout
2130 * 2. layout was changed by another client
2131 * 3. proc2: refresh layout and layout lock granted
c0894c6c
OD
2132 * 4. proc1: to apply a stale layout
2133 */
e476f2e5 2134 if (it && it->it_lock_mode != 0) {
d7e09d03
PT
2135 struct lustre_handle lockh;
2136 struct ldlm_lock *lock;
2137
e476f2e5 2138 lockh.cookie = it->it_lock_handle;
d7e09d03 2139 lock = ldlm_handle2lock(&lockh);
6e16818b 2140 LASSERT(lock);
d7e09d03
PT
2141 if (ldlm_has_layout(lock)) {
2142 struct cl_object_conf conf;
2143
2144 memset(&conf, 0, sizeof(conf));
2145 conf.coc_opc = OBJECT_CONF_SET;
2146 conf.coc_inode = *inode;
2147 conf.coc_lock = lock;
55051039 2148 conf.u.coc_layout = md.layout;
d7e09d03
PT
2149 (void)ll_layout_conf(*inode, &conf);
2150 }
2151 LDLM_LOCK_PUT(lock);
2152 }
2153
2154out:
d7e09d03 2155 md_free_lustre_md(sbi->ll_md_exp, &md);
44ecac68
FY
2156cleanup:
2157 if (rc != 0 && it && it->it_op & IT_OPEN)
2158 ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
2159
0a3bdb00 2160 return rc;
d7e09d03
PT
2161}
2162
4c6243ec 2163int ll_obd_statfs(struct inode *inode, void __user *arg)
d7e09d03
PT
2164{
2165 struct ll_sb_info *sbi = NULL;
2166 struct obd_export *exp;
2167 char *buf = NULL;
2168 struct obd_ioctl_data *data = NULL;
2169 __u32 type;
d7e09d03
PT
2170 int len = 0, rc;
2171
c650ba73
TR
2172 if (!inode) {
2173 rc = -EINVAL;
2174 goto out_statfs;
2175 }
2176
2177 sbi = ll_i2sbi(inode);
2178 if (!sbi) {
34e1f2bb
JL
2179 rc = -EINVAL;
2180 goto out_statfs;
2181 }
d7e09d03
PT
2182
2183 rc = obd_ioctl_getdata(&buf, &len, arg);
2184 if (rc)
34e1f2bb 2185 goto out_statfs;
d7e09d03 2186
bdbb0512 2187 data = (void *)buf;
d7e09d03 2188 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
34e1f2bb
JL
2189 !data->ioc_pbuf1 || !data->ioc_pbuf2) {
2190 rc = -EINVAL;
2191 goto out_statfs;
2192 }
d7e09d03
PT
2193
2194 if (data->ioc_inllen1 != sizeof(__u32) ||
2195 data->ioc_inllen2 != sizeof(__u32) ||
2196 data->ioc_plen1 != sizeof(struct obd_statfs) ||
34e1f2bb
JL
2197 data->ioc_plen2 != sizeof(struct obd_uuid)) {
2198 rc = -EINVAL;
2199 goto out_statfs;
2200 }
d7e09d03
PT
2201
2202 memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
da5ecb4d 2203 if (type & LL_STATFS_LMV) {
d7e09d03 2204 exp = sbi->ll_md_exp;
da5ecb4d 2205 } else if (type & LL_STATFS_LOV) {
d7e09d03 2206 exp = sbi->ll_dt_exp;
da5ecb4d 2207 } else {
34e1f2bb
JL
2208 rc = -ENODEV;
2209 goto out_statfs;
2210 }
d7e09d03 2211
44164fc9 2212 rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
d7e09d03 2213 if (rc)
34e1f2bb 2214 goto out_statfs;
d7e09d03
PT
2215out_statfs:
2216 if (buf)
2217 obd_ioctl_freedata(buf, len);
2218 return rc;
2219}
2220
2221int ll_process_config(struct lustre_cfg *lcfg)
2222{
2223 char *ptr;
2224 void *sb;
2225 struct lprocfs_static_vars lvars;
2226 unsigned long x;
2227 int rc = 0;
2228
2229 lprocfs_llite_init_vars(&lvars);
2230
2231 /* The instance name contains the sb: lustre-client-aacfe000 */
2232 ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
2233 if (!ptr || !*(++ptr))
2234 return -EINVAL;
692f2b6c 2235 rc = kstrtoul(ptr, 16, &x);
2236 if (rc != 0)
d7e09d03
PT
2237 return -EINVAL;
2238 sb = (void *)x;
2239 /* This better be a real Lustre superblock! */
2240 LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
2241
2242 /* Note we have not called client_common_fill_super yet, so
c0894c6c
OD
2243 * proc fns must be able to handle that!
2244 */
d7e09d03
PT
2245 rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
2246 lcfg, sb);
2247 if (rc > 0)
2248 rc = 0;
fbe7c6c7 2249 return rc;
d7e09d03
PT
2250}
2251
2252/* this function prepares md_op_data hint for passing ot down to MD stack. */
aff9d8e8 2253struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
e15ba45d 2254 struct inode *i1, struct inode *i2,
930f60e6
DE
2255 const char *name, size_t namelen,
2256 u32 mode, __u32 opc, void *data)
d7e09d03 2257{
d097d67b
JH
2258 if (!name) {
2259 /* Do not reuse namelen for something else. */
2260 if (namelen)
2261 return ERR_PTR(-EINVAL);
2262 } else {
2263 if (namelen > ll_i2sbi(i1)->ll_namelen)
2264 return ERR_PTR(-ENAMETOOLONG);
2265
2266 if (!lu_name_is_valid_2(name, namelen))
2267 return ERR_PTR(-EINVAL);
2268 }
d7e09d03 2269
6e16818b 2270 if (!op_data)
496a51bd 2271 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
d7e09d03 2272
6e16818b 2273 if (!op_data)
d7e09d03
PT
2274 return ERR_PTR(-ENOMEM);
2275
2276 ll_i2gids(op_data->op_suppgids, i1, i2);
2277 op_data->op_fid1 = *ll_inode2fid(i1);
d81e9009 2278 op_data->op_default_stripe_offset = -1;
2279 if (S_ISDIR(i1->i_mode)) {
2de35386 2280 op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
d0d3caae 2281 if (opc == LUSTRE_OPC_MKDIR)
2282 op_data->op_default_stripe_offset =
2283 ll_i2info(i1)->lli_def_stripe_offset;
d81e9009 2284 }
d7e09d03 2285
1c12cf63 2286 if (i2) {
d7e09d03 2287 op_data->op_fid2 = *ll_inode2fid(i2);
1c12cf63 2288 if (S_ISDIR(i2->i_mode))
2de35386 2289 op_data->op_mea2 = ll_i2info(i2)->lli_lsm_md;
1c12cf63 2290 } else {
d7e09d03 2291 fid_zero(&op_data->op_fid2);
1c12cf63 2292 }
2293
2294 if (ll_i2sbi(i1)->ll_flags & LL_SBI_64BIT_HASH)
2295 op_data->op_cli_flags |= CLI_HASH64;
2296
2297 if (ll_need_32bit_api(ll_i2sbi(i1)))
2298 op_data->op_cli_flags |= CLI_API32;
d7e09d03
PT
2299
2300 op_data->op_name = name;
2301 op_data->op_namelen = namelen;
2302 op_data->op_mode = mode;
14e3f92a 2303 op_data->op_mod_time = ktime_get_real_seconds();
4b1a25f0
PT
2304 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2305 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
d7e09d03
PT
2306 op_data->op_cap = cfs_curproc_cap_pack();
2307 op_data->op_bias = 0;
2308 op_data->op_cli_flags = 0;
6e16818b 2309 if ((opc == LUSTRE_OPC_CREATE) && name &&
1d62e09c 2310 filename_is_volatile(name, namelen, &op_data->op_mds))
d7e09d03 2311 op_data->op_bias |= MDS_CREATE_VOLATILE;
1d62e09c 2312 else
2313 op_data->op_mds = 0;
d7e09d03
PT
2314 op_data->op_data = data;
2315
d7e09d03 2316 /* When called by ll_setattr_raw, file is i1. */
1f6eaf83 2317 if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED)
d7e09d03
PT
2318 op_data->op_bias |= MDS_DATA_MODIFIED;
2319
2320 return op_data;
2321}
2322
2323void ll_finish_md_op_data(struct md_op_data *op_data)
2324{
97903a26 2325 kfree(op_data);
d7e09d03
PT
2326}
2327
2328int ll_show_options(struct seq_file *seq, struct dentry *dentry)
2329{
2330 struct ll_sb_info *sbi;
2331
6e16818b 2332 LASSERT(seq && dentry);
d7e09d03
PT
2333 sbi = ll_s2sbi(dentry->d_sb);
2334
2335 if (sbi->ll_flags & LL_SBI_NOLCK)
2336 seq_puts(seq, ",nolock");
2337
2338 if (sbi->ll_flags & LL_SBI_FLOCK)
2339 seq_puts(seq, ",flock");
2340
2341 if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
2342 seq_puts(seq, ",localflock");
2343
2344 if (sbi->ll_flags & LL_SBI_USER_XATTR)
2345 seq_puts(seq, ",user_xattr");
2346
2347 if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
2348 seq_puts(seq, ",lazystatfs");
2349
2350 if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
2351 seq_puts(seq, ",user_fid2path");
2352
0a3bdb00 2353 return 0;
d7e09d03
PT
2354}
2355
2356/**
2357 * Get obd name by cmd, and copy out to user space
2358 */
2359int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
2360{
2361 struct ll_sb_info *sbi = ll_i2sbi(inode);
2362 struct obd_device *obd;
d7e09d03
PT
2363
2364 if (cmd == OBD_IOC_GETDTNAME)
2365 obd = class_exp2obd(sbi->ll_dt_exp);
2366 else if (cmd == OBD_IOC_GETMDNAME)
2367 obd = class_exp2obd(sbi->ll_md_exp);
2368 else
0a3bdb00 2369 return -EINVAL;
d7e09d03
PT
2370
2371 if (!obd)
0a3bdb00 2372 return -ENOENT;
d7e09d03 2373
7ac5db21
OD
2374 if (copy_to_user((void __user *)arg, obd->obd_name,
2375 strlen(obd->obd_name) + 1))
0a3bdb00 2376 return -EFAULT;
d7e09d03 2377
0a3bdb00 2378 return 0;
d7e09d03
PT
2379}
2380
2381/**
2382 * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
2383 * fsname will be returned in this buffer; otherwise, a static buffer will be
2384 * used to store the fsname and returned to caller.
2385 */
2386char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2387{
2388 static char fsname_static[MTI_NAME_MAXLEN];
2389 struct lustre_sb_info *lsi = s2lsi(sb);
2390 char *ptr;
2391 int len;
2392
6e16818b 2393 if (!buf) {
d7e09d03
PT
2394 /* this means the caller wants to use static buffer
2395 * and it doesn't care about race. Usually this is
c0894c6c
OD
2396 * in error reporting path
2397 */
d7e09d03
PT
2398 buf = fsname_static;
2399 buflen = sizeof(fsname_static);
2400 }
2401
2402 len = strlen(lsi->lsi_lmd->lmd_profile);
2403 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
2404 if (ptr && (strcmp(ptr, "-client") == 0))
2405 len -= 7;
2406
2407 if (unlikely(len >= buflen))
2408 len = buflen - 1;
2409 strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
2410 buf[len] = '\0';
2411
2412 return buf;
2413}
2414
d7e09d03
PT
2415void ll_dirty_page_discard_warn(struct page *page, int ioret)
2416{
2417 char *buf, *path = NULL;
2418 struct dentry *dentry = NULL;
8c7b0e1a 2419 struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
d7e09d03
PT
2420
2421 /* this can be called inside spin lock so use GFP_ATOMIC. */
2422 buf = (char *)__get_free_page(GFP_ATOMIC);
6e16818b 2423 if (buf) {
d7e09d03 2424 dentry = d_find_alias(page->mapping->host);
6e16818b 2425 if (dentry)
1ad581eb 2426 path = dentry_path_raw(dentry, buf, PAGE_SIZE);
d7e09d03
PT
2427 }
2428
73b89907 2429 CDEBUG(D_WARNING,
2d00bd17
JP
2430 "%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
2431 ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
73b89907 2432 s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
8c7b0e1a 2433 PFID(&obj->vob_header.coh_lu.loh_fid),
73b89907 2434 (path && !IS_ERR(path)) ? path : "", ioret);
d7e09d03 2435
6e16818b 2436 if (dentry)
d7e09d03
PT
2437 dput(dentry);
2438
6e16818b 2439 if (buf)
d7e09d03
PT
2440 free_page((unsigned long)buf);
2441}
c948390f 2442
dbf789ce
JX
2443ssize_t ll_copy_user_md(const struct lov_user_md __user *md,
2444 struct lov_user_md **kbuf)
2445{
2446 struct lov_user_md lum;
2447 ssize_t lum_size;
2448
2449 if (copy_from_user(&lum, md, sizeof(lum))) {
2450 lum_size = -EFAULT;
2451 goto no_kbuf;
2452 }
2453
2454 lum_size = ll_lov_user_md_size(&lum);
2455 if (lum_size < 0)
2456 goto no_kbuf;
2457
2458 *kbuf = kzalloc(lum_size, GFP_NOFS);
2459 if (!*kbuf) {
2460 lum_size = -ENOMEM;
2461 goto no_kbuf;
2462 }
2463
2464 if (copy_from_user(*kbuf, md, lum_size) != 0) {
2465 kfree(*kbuf);
2466 *kbuf = NULL;
2467 lum_size = -EFAULT;
2468 }
2469no_kbuf:
2470 return lum_size;
2471}
2472
c948390f
GP
2473/*
2474 * Compute llite root squash state after a change of root squash
2475 * configuration setting or add/remove of a lnet nid
2476 */
2477void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
2478{
2479 struct root_squash_info *squash = &sbi->ll_squash;
2480 lnet_process_id_t id;
2481 bool matched;
2482 int i;
2483
2484 /* Update norootsquash flag */
2485 down_write(&squash->rsi_sem);
2486 if (list_empty(&squash->rsi_nosquash_nids)) {
2487 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
2488 } else {
2489 /*
2490 * Do not apply root squash as soon as one of our NIDs is
2491 * in the nosquash_nids list
2492 */
2493 matched = false;
2494 i = 0;
2495
2496 while (LNetGetId(i++, &id) != -ENOENT) {
2497 if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND)
2498 continue;
2499 if (cfs_match_nid(id.nid, &squash->rsi_nosquash_nids)) {
2500 matched = true;
2501 break;
2502 }
2503 }
2504 if (matched)
2505 sbi->ll_flags |= LL_SBI_NOROOTSQUASH;
2506 else
2507 sbi->ll_flags &= ~LL_SBI_NOROOTSQUASH;
2508 }
2509 up_write(&squash->rsi_sem);
2510}
a6d879fd
HD
2511
2512/**
2513 * Parse linkea content to extract information about a given hardlink
2514 *
2515 * \param[in] ldata - Initialized linkea data
2516 * \param[in] linkno - Link identifier
2517 * \param[out] parent_fid - The entry's parent FID
2518 * \param[in] size - Entry name destination buffer
2519 *
2520 * \retval 0 on success
2521 * \retval Appropriate negative error code on failure
2522 */
2523static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
2524 struct lu_fid *parent_fid, struct lu_name *ln)
2525{
2526 unsigned int idx;
2527 int rc;
2528
2529 rc = linkea_init(ldata);
2530 if (rc < 0)
2531 return rc;
2532
2533 if (linkno >= ldata->ld_leh->leh_reccount)
2534 /* beyond last link */
2535 return -ENODATA;
2536
2537 linkea_first_entry(ldata);
2538 for (idx = 0; ldata->ld_lee; idx++) {
2539 linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, ln,
2540 parent_fid);
2541 if (idx == linkno)
2542 break;
2543
2544 linkea_next_entry(ldata);
2545 }
2546
2547 if (idx < linkno)
2548 return -ENODATA;
2549
2550 return 0;
2551}
2552
2553/**
2554 * Get parent FID and name of an identified link. Operation is performed for
2555 * a given link number, letting the caller iterate over linkno to list one or
2556 * all links of an entry.
2557 *
2558 * \param[in] file - File descriptor against which to perform the operation
2559 * \param[in,out] arg - User-filled structure containing the linkno to operate
2560 * on and the available size. It is eventually filled with
2561 * the requested information or left untouched on error
2562 *
2563 * \retval - 0 on success
2564 * \retval - Appropriate negative error code on failure
2565 */
2566int ll_getparent(struct file *file, struct getparent __user *arg)
2567{
2568 struct inode *inode = file_inode(file);
2569 struct linkea_data *ldata;
2570 struct lu_fid parent_fid;
2571 struct lu_buf buf = {
2572 .lb_buf = NULL,
2573 .lb_len = 0
2574 };
2575 struct lu_name ln;
2576 u32 name_size;
2577 u32 linkno;
2578 int rc;
2579
2580 if (!capable(CFS_CAP_DAC_READ_SEARCH) &&
2581 !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
2582 return -EPERM;
2583
2584 if (get_user(name_size, &arg->gp_name_size))
2585 return -EFAULT;
2586
2587 if (get_user(linkno, &arg->gp_linkno))
2588 return -EFAULT;
2589
2590 if (name_size > PATH_MAX)
2591 return -EINVAL;
2592
2593 ldata = kzalloc(sizeof(*ldata), GFP_NOFS);
2594 if (!ldata)
2595 return -ENOMEM;
2596
2597 rc = linkea_data_new(ldata, &buf);
2598 if (rc < 0)
2599 goto ldata_free;
2600
2601 rc = ll_xattr_list(inode, XATTR_NAME_LINK, XATTR_TRUSTED_T, buf.lb_buf,
2602 buf.lb_len, OBD_MD_FLXATTR);
2603 if (rc < 0)
2604 goto lb_free;
2605
2606 rc = ll_linkea_decode(ldata, linkno, &parent_fid, &ln);
2607 if (rc < 0)
2608 goto lb_free;
2609
2610 if (ln.ln_namelen >= name_size) {
2611 rc = -EOVERFLOW;
2612 goto lb_free;
2613 }
2614
2615 if (copy_to_user(&arg->gp_fid, &parent_fid, sizeof(arg->gp_fid))) {
2616 rc = -EFAULT;
2617 goto lb_free;
2618 }
2619
2620 if (copy_to_user(&arg->gp_name, ln.ln_name, ln.ln_namelen)) {
2621 rc = -EFAULT;
2622 goto lb_free;
2623 }
2624
2625 if (put_user('\0', arg->gp_name + ln.ln_namelen)) {
2626 rc = -EFAULT;
2627 goto lb_free;
2628 }
2629
2630lb_free:
2631 lu_buf_free(&buf);
2632ldata_free:
2633 kfree(ldata);
2634 return rc;
2635}