2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
28 #include "xfs_alloc.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_itable.h"
43 #include "xfs_error.h"
50 #include "xfs_buf_item.h"
51 #include "xfs_utils.h"
52 #include "xfs_dfrag.h"
53 #include "xfs_fsops.h"
55 #include <linux/capability.h>
56 #include <linux/dcache.h>
57 #include <linux/mount.h>
58 #include <linux/namei.h>
59 #include <linux/pagemap.h>
62 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
63 * a file or fs handle.
65 * XFS_IOC_PATH_TO_FSHANDLE
66 * returns fs handle for a mount point or path within that mount point
67 * XFS_IOC_FD_TO_HANDLE
68 * returns full handle for a FD opened in user space
69 * XFS_IOC_PATH_TO_HANDLE
70 * returns full handle for a path
79 xfs_fsop_handlereq_t hreq;
83 if (copy_from_user(&hreq, arg, sizeof(hreq)))
84 return -XFS_ERROR(EFAULT);
86 memset((char *)&handle, 0, sizeof(handle));
89 case XFS_IOC_PATH_TO_FSHANDLE:
90 case XFS_IOC_PATH_TO_HANDLE: {
94 error = user_path_walk_link((const char __user *)hreq.path, &nd);
99 ASSERT(nd.dentry->d_inode);
100 inode = igrab(nd.dentry->d_inode);
105 case XFS_IOC_FD_TO_HANDLE: {
108 file = fget(hreq.fd);
112 ASSERT(file->f_dentry);
113 ASSERT(file->f_dentry->d_inode);
114 inode = igrab(file->f_dentry->d_inode);
121 return -XFS_ERROR(EINVAL);
124 if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
125 /* we're not in XFS anymore, Toto */
127 return -XFS_ERROR(EINVAL);
130 switch (inode->i_mode & S_IFMT) {
137 return -XFS_ERROR(EBADF);
140 /* we need the vnode */
141 vp = LINVFS_GET_VP(inode);
143 /* now we can grab the fsid */
144 memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t));
145 hsize = sizeof(xfs_fsid_t);
147 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
152 /* need to get access to the xfs_inode to read the generation */
153 bhv = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops);
155 ip = XFS_BHVTOI(bhv);
157 lock_mode = xfs_ilock_map_shared(ip);
159 /* fill in fid section of handle from inode */
160 handle.ha_fid.xfs_fid_len = sizeof(xfs_fid_t) -
161 sizeof(handle.ha_fid.xfs_fid_len);
162 handle.ha_fid.xfs_fid_pad = 0;
163 handle.ha_fid.xfs_fid_gen = ip->i_d.di_gen;
164 handle.ha_fid.xfs_fid_ino = ip->i_ino;
166 xfs_iunlock_map_shared(ip, lock_mode);
168 hsize = XFS_HSIZE(handle);
171 /* now copy our handle into the user buffer & write out the size */
172 if (copy_to_user(hreq.ohandle, &handle, hsize) ||
173 copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) {
175 return -XFS_ERROR(EFAULT);
184 * Convert userspace handle data into vnode (and inode).
185 * We [ab]use the fact that all the fsop_handlereq ioctl calls
186 * have a data structure argument whose first component is always
187 * a xfs_fsop_handlereq_t, so we can cast to and from this type.
188 * This allows us to optimise the copy_from_user calls and gives
189 * a handy, shared routine.
191 * If no error, caller must always VN_RELE the returned vp.
194 xfs_vget_fsop_handlereq(
196 struct inode *parinode, /* parent inode pointer */
197 xfs_fsop_handlereq_t *hreq,
199 struct inode **inode)
204 xfs_handle_t *handlep;
207 struct inode *inodep;
214 * Only allow handle opens under a directory.
216 if (!S_ISDIR(parinode->i_mode))
217 return XFS_ERROR(ENOTDIR);
219 hanp = hreq->ihandle;
220 hlen = hreq->ihandlen;
223 if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
224 return XFS_ERROR(EINVAL);
225 if (copy_from_user(handlep, hanp, hlen))
226 return XFS_ERROR(EFAULT);
227 if (hlen < sizeof(*handlep))
228 memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
229 if (hlen > sizeof(handlep->ha_fsid)) {
230 if (handlep->ha_fid.xfs_fid_len !=
231 (hlen - sizeof(handlep->ha_fsid)
232 - sizeof(handlep->ha_fid.xfs_fid_len))
233 || handlep->ha_fid.xfs_fid_pad)
234 return XFS_ERROR(EINVAL);
238 * Crack the handle, obtain the inode # & generation #
240 xfid = (struct xfs_fid *)&handlep->ha_fid;
241 if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) {
242 ino = xfid->xfs_fid_ino;
243 igen = xfid->xfs_fid_gen;
245 return XFS_ERROR(EINVAL);
249 * Get the XFS inode, building a vnode to go with it.
251 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
255 return XFS_ERROR(EIO);
256 if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) {
257 xfs_iput_new(ip, XFS_ILOCK_SHARED);
258 return XFS_ERROR(ENOENT);
262 inodep = LINVFS_GET_IP(vpp);
263 xfs_iunlock(ip, XFS_ILOCK_SHARED);
274 struct file *parfilp,
275 struct inode *parinode)
282 struct dentry *dentry;
284 xfs_fsop_handlereq_t hreq;
286 if (!capable(CAP_SYS_ADMIN))
287 return -XFS_ERROR(EPERM);
288 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
289 return -XFS_ERROR(EFAULT);
291 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode);
295 /* Restrict xfs_open_by_handle to directories & regular files. */
296 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
298 return -XFS_ERROR(EINVAL);
301 #if BITS_PER_LONG != 32
302 hreq.oflags |= O_LARGEFILE;
304 /* Put open permission in namei format. */
305 permflag = hreq.oflags;
306 if ((permflag+1) & O_ACCMODE)
308 if (permflag & O_TRUNC)
311 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
312 (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
314 return -XFS_ERROR(EPERM);
317 if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
319 return -XFS_ERROR(EACCES);
322 /* Can't write directories. */
323 if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
325 return -XFS_ERROR(EISDIR);
328 if ((new_fd = get_unused_fd()) < 0) {
333 dentry = d_alloc_anon(inode);
334 if (dentry == NULL) {
336 put_unused_fd(new_fd);
337 return -XFS_ERROR(ENOMEM);
340 /* Ensure umount returns EBUSY on umounts while this file is open. */
341 mntget(parfilp->f_vfsmnt);
343 /* Create file pointer. */
344 filp = dentry_open(dentry, parfilp->f_vfsmnt, hreq.oflags);
346 put_unused_fd(new_fd);
347 return -XFS_ERROR(-PTR_ERR(filp));
349 if (inode->i_mode & S_IFREG)
350 filp->f_op = &linvfs_invis_file_operations;
352 fd_install(new_fd, filp);
357 xfs_readlink_by_handle(
360 struct file *parfilp,
361 struct inode *parinode)
367 xfs_fsop_handlereq_t hreq;
371 if (!capable(CAP_SYS_ADMIN))
372 return -XFS_ERROR(EPERM);
373 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
374 return -XFS_ERROR(EFAULT);
376 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode);
380 /* Restrict this handle operation to symlinks only. */
381 if (!S_ISLNK(inode->i_mode)) {
383 return -XFS_ERROR(EINVAL);
386 if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) {
388 return -XFS_ERROR(EFAULT);
391 aiov.iov_base = hreq.ohandle;
393 auio.uio_iov = &aiov;
396 auio.uio_segflg = UIO_USERSPACE;
397 auio.uio_resid = olen;
399 VOP_READLINK(vp, &auio, IO_INVIS, NULL, error);
402 return (olen - auio.uio_resid);
406 xfs_fssetdm_by_handle(
409 struct file *parfilp,
410 struct inode *parinode)
413 struct fsdmidata fsd;
414 xfs_fsop_setdm_handlereq_t dmhreq;
419 if (!capable(CAP_MKNOD))
420 return -XFS_ERROR(EPERM);
421 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
422 return -XFS_ERROR(EFAULT);
424 error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &vp, &inode);
428 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
430 return -XFS_ERROR(EPERM);
433 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
435 return -XFS_ERROR(EFAULT);
438 bdp = bhv_base_unlocked(VN_BHV_HEAD(vp));
439 error = xfs_set_dmattrs(bdp, fsd.fsd_dmevmask, fsd.fsd_dmstate, NULL);
448 xfs_attrlist_by_handle(
451 struct file *parfilp,
452 struct inode *parinode)
455 attrlist_cursor_kern_t *cursor;
456 xfs_fsop_attrlist_handlereq_t al_hreq;
461 if (!capable(CAP_SYS_ADMIN))
462 return -XFS_ERROR(EPERM);
463 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
464 return -XFS_ERROR(EFAULT);
465 if (al_hreq.buflen > XATTR_LIST_MAX)
466 return -XFS_ERROR(EINVAL);
468 error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq,
473 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
477 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
478 VOP_ATTR_LIST(vp, kbuf, al_hreq.buflen, al_hreq.flags,
479 cursor, NULL, error);
483 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
495 xfs_attrmulti_attr_get(
505 if (*len > XATTR_SIZE_MAX)
507 kbuf = kmalloc(*len, GFP_KERNEL);
511 VOP_ATTR_GET(vp, name, kbuf, len, flags, NULL, error);
515 if (copy_to_user(ubuf, kbuf, *len))
524 xfs_attrmulti_attr_set(
527 const char __user *ubuf,
534 if (IS_RDONLY(&vp->v_inode))
536 if (IS_IMMUTABLE(&vp->v_inode) || IS_APPEND(&vp->v_inode))
538 if (len > XATTR_SIZE_MAX)
541 kbuf = kmalloc(len, GFP_KERNEL);
545 if (copy_from_user(kbuf, ubuf, len))
548 VOP_ATTR_SET(vp, name, kbuf, len, flags, NULL, error);
556 xfs_attrmulti_attr_remove(
564 if (IS_RDONLY(&vp->v_inode))
566 if (IS_IMMUTABLE(&vp->v_inode) || IS_APPEND(&vp->v_inode))
569 VOP_ATTR_REMOVE(vp, name, flags, NULL, error);
574 xfs_attrmulti_by_handle(
577 struct file *parfilp,
578 struct inode *parinode)
581 xfs_attr_multiop_t *ops;
582 xfs_fsop_attrmulti_handlereq_t am_hreq;
585 unsigned int i, size;
588 if (!capable(CAP_SYS_ADMIN))
589 return -XFS_ERROR(EPERM);
590 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
591 return -XFS_ERROR(EFAULT);
593 error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &vp, &inode);
598 size = am_hreq.opcount * sizeof(attr_multiop_t);
599 if (!size || size > 16 * PAGE_SIZE)
603 ops = kmalloc(size, GFP_KERNEL);
608 if (copy_from_user(ops, am_hreq.ops, size))
611 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
617 for (i = 0; i < am_hreq.opcount; i++) {
618 ops[i].am_error = strncpy_from_user(attr_name,
619 ops[i].am_attrname, MAXNAMELEN);
620 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
622 if (ops[i].am_error < 0)
625 switch (ops[i].am_opcode) {
627 ops[i].am_error = xfs_attrmulti_attr_get(vp,
628 attr_name, ops[i].am_attrvalue,
629 &ops[i].am_length, ops[i].am_flags);
632 ops[i].am_error = xfs_attrmulti_attr_set(vp,
633 attr_name, ops[i].am_attrvalue,
634 ops[i].am_length, ops[i].am_flags);
637 ops[i].am_error = xfs_attrmulti_attr_remove(vp,
638 attr_name, ops[i].am_flags);
641 ops[i].am_error = EINVAL;
645 if (copy_to_user(am_hreq.ops, ops, size))
646 error = XFS_ERROR(EFAULT);
657 /* prototypes for a few of the stack-hungry cases that have
658 * their own functions. Functions are defined after their use
659 * so gcc doesn't get fancy and inline them with -03 */
677 xfs_ioc_fsgeometry_v1(
721 vp = LINVFS_GET_VP(inode);
723 vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address);
725 ip = XFS_BHVTOI(bdp);
730 case XFS_IOC_ALLOCSP:
733 case XFS_IOC_UNRESVSP:
734 case XFS_IOC_ALLOCSP64:
735 case XFS_IOC_FREESP64:
736 case XFS_IOC_RESVSP64:
737 case XFS_IOC_UNRESVSP64:
739 * Only allow the sys admin to reserve space unless
740 * unwritten extents are enabled.
742 if (!XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) &&
743 !capable(CAP_SYS_ADMIN))
746 return xfs_ioc_space(bdp, vp, filp, ioflags, cmd, arg);
748 case XFS_IOC_DIOINFO: {
750 xfs_buftarg_t *target =
751 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
752 mp->m_rtdev_targp : mp->m_ddev_targp;
754 da.d_mem = da.d_miniosz = 1 << target->pbr_sshift;
755 /* The size dio will do in one go */
756 da.d_maxiosz = 64 * PAGE_CACHE_SIZE;
758 if (copy_to_user(arg, &da, sizeof(da)))
759 return -XFS_ERROR(EFAULT);
763 case XFS_IOC_FSBULKSTAT_SINGLE:
764 case XFS_IOC_FSBULKSTAT:
765 case XFS_IOC_FSINUMBERS:
766 return xfs_ioc_bulkstat(mp, cmd, arg);
768 case XFS_IOC_FSGEOMETRY_V1:
769 return xfs_ioc_fsgeometry_v1(mp, arg);
771 case XFS_IOC_FSGEOMETRY:
772 return xfs_ioc_fsgeometry(mp, arg);
774 case XFS_IOC_GETVERSION:
775 case XFS_IOC_GETXFLAGS:
776 case XFS_IOC_SETXFLAGS:
777 case XFS_IOC_FSGETXATTR:
778 case XFS_IOC_FSSETXATTR:
779 case XFS_IOC_FSGETXATTRA:
780 return xfs_ioc_xattr(vp, ip, filp, cmd, arg);
782 case XFS_IOC_FSSETDM: {
783 struct fsdmidata dmi;
785 if (copy_from_user(&dmi, arg, sizeof(dmi)))
786 return -XFS_ERROR(EFAULT);
788 error = xfs_set_dmattrs(bdp, dmi.fsd_dmevmask, dmi.fsd_dmstate,
793 case XFS_IOC_GETBMAP:
794 case XFS_IOC_GETBMAPA:
795 return xfs_ioc_getbmap(bdp, filp, ioflags, cmd, arg);
797 case XFS_IOC_GETBMAPX:
798 return xfs_ioc_getbmapx(bdp, arg);
800 case XFS_IOC_FD_TO_HANDLE:
801 case XFS_IOC_PATH_TO_HANDLE:
802 case XFS_IOC_PATH_TO_FSHANDLE:
803 return xfs_find_handle(cmd, arg);
805 case XFS_IOC_OPEN_BY_HANDLE:
806 return xfs_open_by_handle(mp, arg, filp, inode);
808 case XFS_IOC_FSSETDM_BY_HANDLE:
809 return xfs_fssetdm_by_handle(mp, arg, filp, inode);
811 case XFS_IOC_READLINK_BY_HANDLE:
812 return xfs_readlink_by_handle(mp, arg, filp, inode);
814 case XFS_IOC_ATTRLIST_BY_HANDLE:
815 return xfs_attrlist_by_handle(mp, arg, filp, inode);
817 case XFS_IOC_ATTRMULTI_BY_HANDLE:
818 return xfs_attrmulti_by_handle(mp, arg, filp, inode);
820 case XFS_IOC_SWAPEXT: {
821 error = xfs_swapext((struct xfs_swapext __user *)arg);
825 case XFS_IOC_FSCOUNTS: {
826 xfs_fsop_counts_t out;
828 error = xfs_fs_counts(mp, &out);
832 if (copy_to_user(arg, &out, sizeof(out)))
833 return -XFS_ERROR(EFAULT);
837 case XFS_IOC_SET_RESBLKS: {
838 xfs_fsop_resblks_t inout;
841 if (!capable(CAP_SYS_ADMIN))
844 if (copy_from_user(&inout, arg, sizeof(inout)))
845 return -XFS_ERROR(EFAULT);
847 /* input parameter is passed in resblks field of structure */
849 error = xfs_reserve_blocks(mp, &in, &inout);
853 if (copy_to_user(arg, &inout, sizeof(inout)))
854 return -XFS_ERROR(EFAULT);
858 case XFS_IOC_GET_RESBLKS: {
859 xfs_fsop_resblks_t out;
861 if (!capable(CAP_SYS_ADMIN))
864 error = xfs_reserve_blocks(mp, NULL, &out);
868 if (copy_to_user(arg, &out, sizeof(out)))
869 return -XFS_ERROR(EFAULT);
874 case XFS_IOC_FSGROWFSDATA: {
875 xfs_growfs_data_t in;
877 if (!capable(CAP_SYS_ADMIN))
880 if (copy_from_user(&in, arg, sizeof(in)))
881 return -XFS_ERROR(EFAULT);
883 error = xfs_growfs_data(mp, &in);
887 case XFS_IOC_FSGROWFSLOG: {
890 if (!capable(CAP_SYS_ADMIN))
893 if (copy_from_user(&in, arg, sizeof(in)))
894 return -XFS_ERROR(EFAULT);
896 error = xfs_growfs_log(mp, &in);
900 case XFS_IOC_FSGROWFSRT: {
903 if (!capable(CAP_SYS_ADMIN))
906 if (copy_from_user(&in, arg, sizeof(in)))
907 return -XFS_ERROR(EFAULT);
909 error = xfs_growfs_rt(mp, &in);
914 if (!capable(CAP_SYS_ADMIN))
917 if (inode->i_sb->s_frozen == SB_UNFROZEN)
918 freeze_bdev(inode->i_sb->s_bdev);
922 if (!capable(CAP_SYS_ADMIN))
924 if (inode->i_sb->s_frozen != SB_UNFROZEN)
925 thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
928 case XFS_IOC_GOINGDOWN: {
931 if (!capable(CAP_SYS_ADMIN))
934 if (get_user(in, (__uint32_t __user *)arg))
935 return -XFS_ERROR(EFAULT);
937 error = xfs_fs_goingdown(mp, in);
941 case XFS_IOC_ERROR_INJECTION: {
942 xfs_error_injection_t in;
944 if (!capable(CAP_SYS_ADMIN))
947 if (copy_from_user(&in, arg, sizeof(in)))
948 return -XFS_ERROR(EFAULT);
950 error = xfs_errortag_add(in.errtag, mp);
954 case XFS_IOC_ERROR_CLEARALL:
955 if (!capable(CAP_SYS_ADMIN))
958 error = xfs_errortag_clearall(mp);
979 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
980 return -XFS_ERROR(EPERM);
982 if (!(filp->f_mode & FMODE_WRITE))
983 return -XFS_ERROR(EBADF);
986 return -XFS_ERROR(EINVAL);
988 if (copy_from_user(&bf, arg, sizeof(bf)))
989 return -XFS_ERROR(EFAULT);
991 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
992 attr_flags |= ATTR_NONBLOCK;
993 if (ioflags & IO_INVIS)
994 attr_flags |= ATTR_DMI;
996 error = xfs_change_file_space(bdp, cmd, &bf, filp->f_pos,
1007 xfs_fsop_bulkreq_t bulkreq;
1008 int count; /* # of records returned */
1009 xfs_ino_t inlast; /* last inode number */
1013 /* done = 1 if there are more stats to get and if bulkstat */
1014 /* should be called again (unused here, but used in dmapi) */
1016 if (!capable(CAP_SYS_ADMIN))
1019 if (XFS_FORCED_SHUTDOWN(mp))
1020 return -XFS_ERROR(EIO);
1022 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t)))
1023 return -XFS_ERROR(EFAULT);
1025 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
1026 return -XFS_ERROR(EFAULT);
1028 if ((count = bulkreq.icount) <= 0)
1029 return -XFS_ERROR(EINVAL);
1031 if (cmd == XFS_IOC_FSINUMBERS)
1032 error = xfs_inumbers(mp, &inlast, &count,
1034 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
1035 error = xfs_bulkstat_single(mp, &inlast,
1036 bulkreq.ubuffer, &done);
1037 else { /* XFS_IOC_FSBULKSTAT */
1038 if (count == 1 && inlast != 0) {
1040 error = xfs_bulkstat_single(mp, &inlast,
1041 bulkreq.ubuffer, &done);
1043 error = xfs_bulkstat(mp, &inlast, &count,
1044 (bulkstat_one_pf)xfs_bulkstat_one, NULL,
1045 sizeof(xfs_bstat_t), bulkreq.ubuffer,
1046 BULKSTAT_FG_QUICK, &done);
1053 if (bulkreq.ocount != NULL) {
1054 if (copy_to_user(bulkreq.lastip, &inlast,
1056 return -XFS_ERROR(EFAULT);
1058 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
1059 return -XFS_ERROR(EFAULT);
1066 xfs_ioc_fsgeometry_v1(
1070 xfs_fsop_geom_v1_t fsgeo;
1073 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
1077 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
1078 return -XFS_ERROR(EFAULT);
1087 xfs_fsop_geom_t fsgeo;
1090 error = xfs_fs_geometry(mp, &fsgeo, 4);
1094 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
1095 return -XFS_ERROR(EFAULT);
1100 * Linux extended inode flags interface.
1102 #define LINUX_XFLAG_SYNC 0x00000008 /* Synchronous updates */
1103 #define LINUX_XFLAG_IMMUTABLE 0x00000010 /* Immutable file */
1104 #define LINUX_XFLAG_APPEND 0x00000020 /* writes to file may only append */
1105 #define LINUX_XFLAG_NODUMP 0x00000040 /* do not dump file */
1106 #define LINUX_XFLAG_NOATIME 0x00000080 /* do not update atime */
1109 xfs_merge_ioc_xflags(
1113 unsigned int xflags = start;
1115 if (flags & LINUX_XFLAG_IMMUTABLE)
1116 xflags |= XFS_XFLAG_IMMUTABLE;
1118 xflags &= ~XFS_XFLAG_IMMUTABLE;
1119 if (flags & LINUX_XFLAG_APPEND)
1120 xflags |= XFS_XFLAG_APPEND;
1122 xflags &= ~XFS_XFLAG_APPEND;
1123 if (flags & LINUX_XFLAG_SYNC)
1124 xflags |= XFS_XFLAG_SYNC;
1126 xflags &= ~XFS_XFLAG_SYNC;
1127 if (flags & LINUX_XFLAG_NOATIME)
1128 xflags |= XFS_XFLAG_NOATIME;
1130 xflags &= ~XFS_XFLAG_NOATIME;
1131 if (flags & LINUX_XFLAG_NODUMP)
1132 xflags |= XFS_XFLAG_NODUMP;
1134 xflags &= ~XFS_XFLAG_NODUMP;
1141 __uint16_t di_flags)
1143 unsigned int flags = 0;
1145 if (di_flags & XFS_DIFLAG_IMMUTABLE)
1146 flags |= LINUX_XFLAG_IMMUTABLE;
1147 if (di_flags & XFS_DIFLAG_APPEND)
1148 flags |= LINUX_XFLAG_APPEND;
1149 if (di_flags & XFS_DIFLAG_SYNC)
1150 flags |= LINUX_XFLAG_SYNC;
1151 if (di_flags & XFS_DIFLAG_NOATIME)
1152 flags |= LINUX_XFLAG_NOATIME;
1153 if (di_flags & XFS_DIFLAG_NODUMP)
1154 flags |= LINUX_XFLAG_NODUMP;
1173 case XFS_IOC_FSGETXATTR: {
1174 va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
1175 XFS_AT_NEXTENTS | XFS_AT_PROJID;
1176 VOP_GETATTR(vp, &va, 0, NULL, error);
1180 fa.fsx_xflags = va.va_xflags;
1181 fa.fsx_extsize = va.va_extsize;
1182 fa.fsx_nextents = va.va_nextents;
1183 fa.fsx_projid = va.va_projid;
1185 if (copy_to_user(arg, &fa, sizeof(fa)))
1186 return -XFS_ERROR(EFAULT);
1190 case XFS_IOC_FSSETXATTR: {
1191 if (copy_from_user(&fa, arg, sizeof(fa)))
1192 return -XFS_ERROR(EFAULT);
1195 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1196 attr_flags |= ATTR_NONBLOCK;
1198 va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
1199 va.va_xflags = fa.fsx_xflags;
1200 va.va_extsize = fa.fsx_extsize;
1201 va.va_projid = fa.fsx_projid;
1203 VOP_SETATTR(vp, &va, attr_flags, NULL, error);
1205 vn_revalidate(vp); /* update Linux inode flags */
1209 case XFS_IOC_FSGETXATTRA: {
1210 va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
1211 XFS_AT_ANEXTENTS | XFS_AT_PROJID;
1212 VOP_GETATTR(vp, &va, 0, NULL, error);
1216 fa.fsx_xflags = va.va_xflags;
1217 fa.fsx_extsize = va.va_extsize;
1218 fa.fsx_nextents = va.va_anextents;
1219 fa.fsx_projid = va.va_projid;
1221 if (copy_to_user(arg, &fa, sizeof(fa)))
1222 return -XFS_ERROR(EFAULT);
1226 case XFS_IOC_GETXFLAGS: {
1227 flags = xfs_di2lxflags(ip->i_d.di_flags);
1228 if (copy_to_user(arg, &flags, sizeof(flags)))
1229 return -XFS_ERROR(EFAULT);
1233 case XFS_IOC_SETXFLAGS: {
1234 if (copy_from_user(&flags, arg, sizeof(flags)))
1235 return -XFS_ERROR(EFAULT);
1237 if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \
1238 LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \
1240 return -XFS_ERROR(EOPNOTSUPP);
1243 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1244 attr_flags |= ATTR_NONBLOCK;
1246 va.va_mask = XFS_AT_XFLAGS;
1247 va.va_xflags = xfs_merge_ioc_xflags(flags,
1250 VOP_SETATTR(vp, &va, attr_flags, NULL, error);
1252 vn_revalidate(vp); /* update Linux inode flags */
1256 case XFS_IOC_GETVERSION: {
1257 flags = LINVFS_GET_IP(vp)->i_generation;
1258 if (copy_to_user(arg, &flags, sizeof(flags)))
1259 return -XFS_ERROR(EFAULT);
1280 if (copy_from_user(&bm, arg, sizeof(bm)))
1281 return -XFS_ERROR(EFAULT);
1283 if (bm.bmv_count < 2)
1284 return -XFS_ERROR(EINVAL);
1286 iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
1287 if (ioflags & IO_INVIS)
1288 iflags |= BMV_IF_NO_DMAPI_READ;
1290 error = xfs_getbmap(bdp, &bm, (struct getbmap __user *)arg+1, iflags);
1294 if (copy_to_user(arg, &bm, sizeof(bm)))
1295 return -XFS_ERROR(EFAULT);
1304 struct getbmapx bmx;
1309 if (copy_from_user(&bmx, arg, sizeof(bmx)))
1310 return -XFS_ERROR(EFAULT);
1312 if (bmx.bmv_count < 2)
1313 return -XFS_ERROR(EINVAL);
1316 * Map input getbmapx structure to a getbmap
1317 * structure for xfs_getbmap.
1319 GETBMAP_CONVERT(bmx, bm);
1321 iflags = bmx.bmv_iflags;
1323 if (iflags & (~BMV_IF_VALID))
1324 return -XFS_ERROR(EINVAL);
1326 iflags |= BMV_IF_EXTENDED;
1328 error = xfs_getbmap(bdp, &bm, (struct getbmapx __user *)arg+1, iflags);
1332 GETBMAP_CONVERT(bm, bmx);
1334 if (copy_to_user(arg, &bmx, sizeof(bmx)))
1335 return -XFS_ERROR(EFAULT);