2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/posix_acl.h>
16 #include <linux/sort.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/crc32.h>
19 #include <linux/lm_interface.h>
20 #include <linux/security.h>
33 #include "ops_address.h"
35 #include "ops_inode.h"
41 static int iget_test(struct inode *inode, void *opaque)
43 struct gfs2_inode *ip = GFS2_I(inode);
44 struct gfs2_inum_host *inum = opaque;
46 if (ip->i_num.no_addr == inum->no_addr)
52 static int iget_set(struct inode *inode, void *opaque)
54 struct gfs2_inode *ip = GFS2_I(inode);
55 struct gfs2_inum_host *inum = opaque;
58 inode->i_ino = inum->no_addr;
62 struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum)
64 return ilookup5(sb, (unsigned long)inum->no_formal_ino,
68 static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum_host *inum)
70 return iget5_locked(sb, (unsigned long)inum->no_formal_ino,
71 iget_test, iget_set, inum);
75 * gfs2_inode_lookup - Lookup an inode
76 * @sb: The super block
77 * @inum: The inode number
78 * @type: The type of the inode
80 * Returns: A VFS inode, or an error
83 struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned int type)
85 struct inode *inode = gfs2_iget(sb, inum);
86 struct gfs2_inode *ip = GFS2_I(inode);
87 struct gfs2_glock *io_gl;
91 return ERR_PTR(-ENOBUFS);
93 if (inode->i_state & I_NEW) {
94 struct gfs2_sbd *sdp = GFS2_SB(inode);
95 umode_t mode = DT2IF(type);
96 inode->i_private = ip;
100 inode->i_op = &gfs2_file_iops;
101 inode->i_fop = &gfs2_file_fops;
102 inode->i_mapping->a_ops = &gfs2_file_aops;
103 } else if (S_ISDIR(mode)) {
104 inode->i_op = &gfs2_dir_iops;
105 inode->i_fop = &gfs2_dir_fops;
106 } else if (S_ISLNK(mode)) {
107 inode->i_op = &gfs2_symlink_iops;
109 inode->i_op = &gfs2_dev_iops;
112 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
115 ip->i_gl->gl_object = ip;
117 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
121 set_bit(GIF_INVALID, &ip->i_flags);
122 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
126 gfs2_glock_put(io_gl);
127 unlock_new_inode(inode);
132 gfs2_glock_put(io_gl);
134 ip->i_gl->gl_object = NULL;
135 gfs2_glock_put(ip->i_gl);
138 return ERR_PTR(error);
141 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
143 struct gfs2_dinode_host *di = &ip->i_di;
144 const struct gfs2_dinode *str = buf;
146 if (ip->i_num.no_addr != be64_to_cpu(str->di_num.no_addr)) {
147 if (gfs2_consist_inode(ip))
148 gfs2_dinode_print(ip);
151 if (ip->i_num.no_formal_ino != be64_to_cpu(str->di_num.no_formal_ino))
154 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
155 ip->i_inode.i_rdev = 0;
156 switch (ip->i_inode.i_mode & S_IFMT) {
159 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
160 be32_to_cpu(str->di_minor));
164 ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
165 ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
167 * We will need to review setting the nlink count here in the
168 * light of the forthcoming ro bind mount work. This is a reminder
171 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
172 di->di_size = be64_to_cpu(str->di_size);
173 i_size_write(&ip->i_inode, di->di_size);
174 di->di_blocks = be64_to_cpu(str->di_blocks);
175 gfs2_set_inode_blocks(&ip->i_inode);
176 ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
177 ip->i_inode.i_atime.tv_nsec = 0;
178 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
179 ip->i_inode.i_mtime.tv_nsec = 0;
180 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
181 ip->i_inode.i_ctime.tv_nsec = 0;
183 di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
184 di->di_goal_data = be64_to_cpu(str->di_goal_data);
185 di->di_generation = be64_to_cpu(str->di_generation);
187 di->di_flags = be32_to_cpu(str->di_flags);
188 gfs2_set_inode_flags(&ip->i_inode);
189 di->di_height = be16_to_cpu(str->di_height);
191 di->di_depth = be16_to_cpu(str->di_depth);
192 di->di_entries = be32_to_cpu(str->di_entries);
194 di->di_eattr = be64_to_cpu(str->di_eattr);
199 * gfs2_inode_refresh - Refresh the incore copy of the dinode
200 * @ip: The GFS2 inode
205 int gfs2_inode_refresh(struct gfs2_inode *ip)
207 struct buffer_head *dibh;
210 error = gfs2_meta_inode_buffer(ip, &dibh);
214 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
219 error = gfs2_dinode_in(ip, dibh->b_data);
221 clear_bit(GIF_INVALID, &ip->i_flags);
226 int gfs2_dinode_dealloc(struct gfs2_inode *ip)
228 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
229 struct gfs2_alloc *al;
230 struct gfs2_rgrpd *rgd;
233 if (ip->i_di.di_blocks != 1) {
234 if (gfs2_consist_inode(ip))
235 gfs2_dinode_print(ip);
239 al = gfs2_alloc_get(ip);
241 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
245 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
249 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
251 gfs2_consist_inode(ip);
253 goto out_rindex_relse;
256 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
259 goto out_rindex_relse;
261 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
265 gfs2_trans_add_gl(ip->i_gl);
267 gfs2_free_di(rgd, ip);
270 clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
273 gfs2_glock_dq_uninit(&al->al_rgd_gh);
275 gfs2_glock_dq_uninit(&al->al_ri_gh);
277 gfs2_quota_unhold(ip);
283 static int gfs2_change_nlink_i(struct gfs2_inode *ip)
285 struct gfs2_sbd *sdp = ip->i_inode.i_sb->s_fs_info;
286 struct gfs2_inode *rindex = GFS2_I(sdp->sd_rindex);
287 struct gfs2_glock *ri_gl = rindex->i_gl;
288 struct gfs2_rgrpd *rgd;
289 struct gfs2_holder ri_gh, rg_gh;
292 /* if we come from rename path, we could have the lock already */
293 existing = gfs2_glock_is_locked_by_me(ri_gl);
295 error = gfs2_rindex_hold(sdp, &ri_gh);
300 /* find the matching rgd */
302 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
307 * Eventually we may want to move rgd(s) to a linked list
308 * and piggyback the free logic into one of gfs2 daemons
309 * to gain some performance.
311 if (!rgd->rd_gl || !gfs2_glock_is_locked_by_me(rgd->rd_gl)) {
312 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
316 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
317 gfs2_glock_dq_uninit(&rg_gh);
322 gfs2_glock_dq_uninit(&ri_gh);
328 * gfs2_change_nlink - Change nlink count on inode
329 * @ip: The GFS2 inode
330 * @diff: The change in the nlink count required
334 int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
336 struct buffer_head *dibh;
340 BUG_ON(diff != 1 && diff != -1);
341 nlink = ip->i_inode.i_nlink + diff;
343 /* If we are reducing the nlink count, but the new value ends up being
344 bigger than the old one, we must have underflowed. */
345 if (diff < 0 && nlink > ip->i_inode.i_nlink) {
346 if (gfs2_consist_inode(ip))
347 gfs2_dinode_print(ip);
351 error = gfs2_meta_inode_buffer(ip, &dibh);
356 inc_nlink(&ip->i_inode);
358 drop_nlink(&ip->i_inode);
360 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
362 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
363 gfs2_dinode_out(ip, dibh->b_data);
365 mark_inode_dirty(&ip->i_inode);
367 if (ip->i_inode.i_nlink == 0)
368 error = gfs2_change_nlink_i(ip);
373 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
377 gfs2_str2qstr(&qstr, name);
378 inode = gfs2_lookupi(dip, &qstr, 1, NULL);
379 /* gfs2_lookupi has inconsistent callers: vfs
380 * related routines expect NULL for no entry found,
381 * gfs2_lookup_simple callers expect ENOENT
382 * and do not check for NULL.
385 return ERR_PTR(-ENOENT);
392 * gfs2_lookupi - Look up a filename in a directory and return its inode
393 * @d_gh: An initialized holder for the directory glock
394 * @name: The name of the inode to look for
395 * @is_root: If 1, ignore the caller's permissions
396 * @i_gh: An uninitialized holder for the new inode glock
398 * This can be called via the VFS filldir function when NFS is doing
399 * a readdirplus and the inode which its intending to stat isn't
400 * already in cache. In this case we must not take the directory glock
401 * again, since the readdir call will have already taken that lock.
406 struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
407 int is_root, struct nameidata *nd)
409 struct super_block *sb = dir->i_sb;
410 struct gfs2_inode *dip = GFS2_I(dir);
411 struct gfs2_holder d_gh;
412 struct gfs2_inum_host inum;
415 struct inode *inode = NULL;
418 if (!name->len || name->len > GFS2_FNAMESIZE)
419 return ERR_PTR(-ENAMETOOLONG);
421 if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
422 (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
423 dir == sb->s_root->d_inode)) {
428 if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
429 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
431 return ERR_PTR(error);
436 error = permission(dir, MAY_EXEC, NULL);
441 error = gfs2_dir_search(dir, name, &inum, &type);
445 inode = gfs2_inode_lookup(sb, &inum, type);
449 gfs2_glock_dq_uninit(&d_gh);
450 if (error == -ENOENT)
452 return inode ? inode : ERR_PTR(error);
455 static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
457 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
458 struct buffer_head *bh;
459 struct gfs2_inum_range_host ir;
462 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
465 mutex_lock(&sdp->sd_inum_mutex);
467 error = gfs2_meta_inode_buffer(ip, &bh);
469 mutex_unlock(&sdp->sd_inum_mutex);
474 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
477 *formal_ino = ir.ir_start++;
479 gfs2_trans_add_bh(ip->i_gl, bh, 1);
480 gfs2_inum_range_out(&ir,
481 bh->b_data + sizeof(struct gfs2_dinode));
483 mutex_unlock(&sdp->sd_inum_mutex);
490 mutex_unlock(&sdp->sd_inum_mutex);
496 static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
498 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
499 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
500 struct gfs2_holder gh;
501 struct buffer_head *bh;
502 struct gfs2_inum_range_host ir;
505 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
509 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
512 mutex_lock(&sdp->sd_inum_mutex);
514 error = gfs2_meta_inode_buffer(ip, &bh);
518 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
521 struct buffer_head *m_bh;
525 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
529 z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
530 x = y = be64_to_cpu(z);
532 ir.ir_length = GFS2_INUM_QUANTUM;
533 x += GFS2_INUM_QUANTUM;
535 gfs2_consist_inode(m_ip);
537 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
538 *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
543 *formal_ino = ir.ir_start++;
546 gfs2_trans_add_bh(ip->i_gl, bh, 1);
547 gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
552 mutex_unlock(&sdp->sd_inum_mutex);
555 gfs2_glock_dq_uninit(&gh);
559 static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
563 error = pick_formal_ino_1(sdp, inum);
567 error = pick_formal_ino_2(sdp, inum);
573 * create_ok - OK to create a new on-disk inode here?
574 * @dip: Directory in which dinode is to be created
575 * @name: Name of new dinode
581 static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
586 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
590 /* Don't create entries in an unlinked directory */
591 if (!dip->i_inode.i_nlink)
594 error = gfs2_dir_search(&dip->i_inode, name, NULL, NULL);
605 if (dip->i_di.di_entries == (u32)-1)
607 if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
613 static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
614 unsigned int *uid, unsigned int *gid)
616 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
617 (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
620 else if (dip->i_inode.i_uid != current->fsuid)
622 *uid = dip->i_inode.i_uid;
624 *uid = current->fsuid;
626 if (dip->i_inode.i_mode & S_ISGID) {
629 *gid = dip->i_inode.i_gid;
631 *gid = current->fsgid;
634 static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum_host *inum,
637 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
642 dip->i_alloc.al_requested = RES_DINODE;
643 error = gfs2_inplace_reserve(dip);
647 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
651 inum->no_addr = gfs2_alloc_di(dip, generation);
656 gfs2_inplace_release(dip);
663 * init_dinode - Fill in a new dinode structure
664 * @dip: the directory this inode is being created in
665 * @gl: The glock covering the new inode
666 * @inum: the inode number
667 * @mode: the file permissions
673 static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
674 const struct gfs2_inum_host *inum, unsigned int mode,
675 unsigned int uid, unsigned int gid,
676 const u64 *generation, dev_t dev)
678 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
679 struct gfs2_dinode *di;
680 struct buffer_head *dibh;
682 dibh = gfs2_meta_new(gl, inum->no_addr);
683 gfs2_trans_add_bh(gl, dibh, 1);
684 gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
685 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
686 di = (struct gfs2_dinode *)dibh->b_data;
688 di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
689 di->di_num.no_addr = cpu_to_be64(inum->no_addr);
690 di->di_mode = cpu_to_be32(mode);
691 di->di_uid = cpu_to_be32(uid);
692 di->di_gid = cpu_to_be32(gid);
695 di->di_blocks = cpu_to_be64(1);
696 di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds());
697 di->di_major = cpu_to_be32(MAJOR(dev));
698 di->di_minor = cpu_to_be32(MINOR(dev));
699 di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
700 di->di_generation = cpu_to_be64(*generation);
704 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
705 gfs2_tune_get(sdp, gt_new_files_jdata))
706 di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
707 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
708 gfs2_tune_get(sdp, gt_new_files_directio))
709 di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
710 } else if (S_ISDIR(mode)) {
711 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
712 GFS2_DIF_INHERIT_DIRECTIO);
713 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
714 GFS2_DIF_INHERIT_JDATA);
718 di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
724 memset(&di->__pad4, 0, sizeof(di->__pad4));
726 memset(&di->di_reserved, 0, sizeof(di->di_reserved));
731 static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
732 unsigned int mode, const struct gfs2_inum_host *inum,
733 const u64 *generation, dev_t dev)
735 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
736 unsigned int uid, gid;
739 munge_mode_uid_gid(dip, &mode, &uid, &gid);
742 error = gfs2_quota_lock(dip, uid, gid);
746 error = gfs2_quota_check(dip, uid, gid);
750 error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
754 init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
755 gfs2_quota_change(dip, +1, uid, gid);
759 gfs2_quota_unlock(dip);
765 static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
766 struct gfs2_inode *ip)
768 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
769 struct gfs2_alloc *al;
771 struct buffer_head *dibh;
774 al = gfs2_alloc_get(dip);
776 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
780 error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
781 if (alloc_required < 0)
783 if (alloc_required) {
784 error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
786 goto fail_quota_locks;
788 al->al_requested = sdp->sd_max_dirres;
790 error = gfs2_inplace_reserve(dip);
792 goto fail_quota_locks;
794 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
795 al->al_rgd->rd_ri.ri_length +
797 RES_STATFS + RES_QUOTA, 0);
801 error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
803 goto fail_quota_locks;
806 error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_inode.i_mode));
810 error = gfs2_meta_inode_buffer(ip, &dibh);
813 ip->i_inode.i_nlink = 1;
814 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
815 gfs2_dinode_out(ip, dibh->b_data);
823 if (dip->i_alloc.al_rgd)
824 gfs2_inplace_release(dip);
827 gfs2_quota_unlock(dip);
834 static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
840 struct gfs2_ea_request er;
842 err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
843 &name, &value, &len);
846 if (err == -EOPNOTSUPP)
851 memset(&er, 0, sizeof(struct gfs2_ea_request));
853 er.er_type = GFS2_EATYPE_SECURITY;
856 er.er_name_len = strlen(name);
857 er.er_data_len = len;
859 err = gfs2_ea_set_i(ip, &er);
868 * gfs2_createi - Create a new inode
869 * @ghs: An array of two holders
870 * @name: The name of the new file
871 * @mode: the permissions on the new inode
873 * @ghs[0] is an initialized holder for the directory
874 * @ghs[1] is the holder for the inode lock
876 * If the return value is not NULL, the glocks on both the directory and the new
877 * file are held. A transaction has been started and an inplace reservation
883 struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
884 unsigned int mode, dev_t dev)
887 struct gfs2_inode *dip = ghs->gh_gl->gl_object;
888 struct inode *dir = &dip->i_inode;
889 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
890 struct gfs2_inum_host inum;
894 if (!name->len || name->len > GFS2_FNAMESIZE)
895 return ERR_PTR(-ENAMETOOLONG);
897 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
898 error = gfs2_glock_nq(ghs);
902 error = create_ok(dip, name, mode);
906 error = pick_formal_ino(sdp, &inum.no_formal_ino);
910 error = alloc_dinode(dip, &inum, &generation);
914 error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
915 LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
919 error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
923 inode = gfs2_inode_lookup(dir->i_sb, &inum, IF2DT(mode));
927 error = gfs2_inode_refresh(GFS2_I(inode));
931 error = gfs2_acl_create(dip, GFS2_I(inode));
935 error = gfs2_security_init(dip, GFS2_I(inode));
939 error = link_dinode(dip, name, GFS2_I(inode));
944 return ERR_PTR(-ENOMEM);
950 gfs2_glock_dq_uninit(ghs + 1);
954 return ERR_PTR(error);
958 * gfs2_rmdiri - Remove a directory
959 * @dip: The parent directory of the directory to be removed
960 * @name: The name of the directory to be removed
961 * @ip: The GFS2 inode of the directory to be removed
963 * Assumes Glocks on dip and ip are held
968 int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
969 struct gfs2_inode *ip)
974 if (ip->i_di.di_entries != 2) {
975 if (gfs2_consist_inode(ip))
976 gfs2_dinode_print(ip);
980 error = gfs2_dir_del(dip, name);
984 error = gfs2_change_nlink(dip, -1);
988 gfs2_str2qstr(&dotname, ".");
989 error = gfs2_dir_del(ip, &dotname);
993 gfs2_str2qstr(&dotname, "..");
994 error = gfs2_dir_del(ip, &dotname);
998 /* It looks odd, but it really should be done twice */
999 error = gfs2_change_nlink(ip, -1);
1003 error = gfs2_change_nlink(ip, -1);
1011 * gfs2_unlink_ok - check to see that a inode is still in a directory
1012 * @dip: the directory
1013 * @name: the name of the file
1016 * Assumes that the lock on (at least) @dip is held.
1018 * Returns: 0 if the parent/child relationship is correct, errno if it isn't
1021 int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1022 struct gfs2_inode *ip)
1024 struct gfs2_inum_host inum;
1028 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1031 if ((dip->i_inode.i_mode & S_ISVTX) &&
1032 dip->i_inode.i_uid != current->fsuid &&
1033 ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
1036 if (IS_APPEND(&dip->i_inode))
1039 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
1043 error = gfs2_dir_search(&dip->i_inode, name, &inum, &type);
1047 if (!gfs2_inum_equal(&inum, &ip->i_num))
1050 if (IF2DT(ip->i_inode.i_mode) != type) {
1051 gfs2_consist_inode(dip);
1059 * gfs2_ok_to_move - check if it's ok to move a directory to another directory
1063 * Follow @to back to the root and make sure we don't encounter @this
1064 * Assumes we already hold the rename lock.
1069 int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
1071 struct inode *dir = &to->i_inode;
1072 struct super_block *sb = dir->i_sb;
1077 gfs2_str2qstr(&dotdot, "..");
1082 if (dir == &this->i_inode) {
1086 if (dir == sb->s_root->d_inode) {
1091 tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
1093 error = PTR_ERR(tmp);
1107 * gfs2_readlinki - return the contents of a symlink
1108 * @ip: the symlink's inode
1109 * @buf: a pointer to the buffer to be filled
1110 * @len: a pointer to the length of @buf
1112 * If @buf is too small, a piece of memory is kmalloc()ed and needs
1113 * to be freed by the caller.
1118 int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
1120 struct gfs2_holder i_gh;
1121 struct buffer_head *dibh;
1125 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
1126 error = gfs2_glock_nq_atime(&i_gh);
1128 gfs2_holder_uninit(&i_gh);
1132 if (!ip->i_di.di_size) {
1133 gfs2_consist_inode(ip);
1138 error = gfs2_meta_inode_buffer(ip, &dibh);
1142 x = ip->i_di.di_size + 1;
1144 *buf = kmalloc(x, GFP_KERNEL);
1151 memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
1157 gfs2_glock_dq_uninit(&i_gh);
1162 * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
1163 * conditionally update the inode's atime
1164 * @gh: the holder to acquire
1166 * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
1167 * Update if the difference between the current time and the inode's current
1168 * atime is greater than an interval specified at mount.
1173 int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1175 struct gfs2_glock *gl = gh->gh_gl;
1176 struct gfs2_sbd *sdp = gl->gl_sbd;
1177 struct gfs2_inode *ip = gl->gl_object;
1178 s64 curtime, quantum = gfs2_tune_get(sdp, gt_atime_quantum);
1183 if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
1184 gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
1185 gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
1188 state = gh->gh_state;
1189 flags = gh->gh_flags;
1191 error = gfs2_glock_nq(gh);
1195 if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
1196 (sdp->sd_vfs->s_flags & MS_RDONLY))
1199 curtime = get_seconds();
1200 if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
1202 gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
1204 error = gfs2_glock_nq(gh);
1208 /* Verify that atime hasn't been updated while we were
1209 trying to get exclusive lock. */
1211 curtime = get_seconds();
1212 if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
1213 struct buffer_head *dibh;
1214 struct gfs2_dinode *di;
1216 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1217 if (error == -EROFS)
1222 error = gfs2_meta_inode_buffer(ip, &dibh);
1224 goto fail_end_trans;
1226 ip->i_inode.i_atime.tv_sec = curtime;
1228 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1229 di = (struct gfs2_dinode *)dibh->b_data;
1230 di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1233 gfs2_trans_end(sdp);
1236 /* If someone else has asked for the glock,
1237 unlock and let them have it. Then reacquire
1238 in the original state. */
1239 if (gfs2_glock_is_blocking(gl)) {
1241 gfs2_holder_reinit(state, flags, gh);
1242 return gfs2_glock_nq(gh);
1249 gfs2_trans_end(sdp);
1256 __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1258 struct buffer_head *dibh;
1261 error = gfs2_meta_inode_buffer(ip, &dibh);
1263 error = inode_setattr(&ip->i_inode, attr);
1264 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1265 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1266 gfs2_dinode_out(ip, dibh->b_data);
1273 * gfs2_setattr_simple -
1277 * Called with a reference on the vnode.
1282 int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1286 if (current->journal_info)
1287 return __gfs2_setattr_simple(ip, attr);
1289 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
1293 error = __gfs2_setattr_simple(ip, attr);
1294 gfs2_trans_end(GFS2_SB(&ip->i_inode));