2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_quota.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_btree.h"
36 #include "xfs_btree_trace.h"
37 #include "xfs_ialloc.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_error.h"
41 #include "xfs_itable.h"
42 #include "xfs_fsops.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_utils.h"
46 #include "xfs_vnodeops.h"
47 #include "xfs_log_priv.h"
48 #include "xfs_trans_priv.h"
49 #include "xfs_filestream.h"
50 #include "xfs_da_btree.h"
51 #include "xfs_extfree_item.h"
52 #include "xfs_mru_cache.h"
53 #include "xfs_inode_item.h"
55 #include "xfs_trace.h"
57 #include <linux/namei.h>
58 #include <linux/init.h>
59 #include <linux/slab.h>
60 #include <linux/mount.h>
61 #include <linux/mempool.h>
62 #include <linux/writeback.h>
63 #include <linux/kthread.h>
64 #include <linux/freezer.h>
65 #include <linux/parser.h>
67 static const struct super_operations xfs_super_operations;
68 static kmem_zone_t *xfs_ioend_zone;
69 mempool_t *xfs_ioend_pool;
71 #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */
72 #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */
73 #define MNTOPT_LOGDEV "logdev" /* log device */
74 #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
75 #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
76 #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
77 #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
78 #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
79 #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
80 #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
81 #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
82 #define MNTOPT_MTPT "mtpt" /* filesystem mount point */
83 #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
84 #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
85 #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
86 #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
87 #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
88 #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
89 #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
90 * unwritten extent conversion */
91 #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
92 #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
93 #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
94 #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
95 #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */
96 #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes
98 #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */
99 #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
100 #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */
101 #define MNTOPT_QUOTA "quota" /* disk quotas (user) */
102 #define MNTOPT_NOQUOTA "noquota" /* no quotas */
103 #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
104 #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
105 #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */
106 #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
107 #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
108 #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */
109 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
110 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
111 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
112 #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
113 #define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */
114 #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */
117 * Table driven mount option parser.
119 * Currently only used for remount, but it will be used for mount
120 * in the future, too.
123 Opt_barrier, Opt_nobarrier, Opt_err
126 static const match_table_t tokens = {
127 {Opt_barrier, "barrier"},
128 {Opt_nobarrier, "nobarrier"},
134 suffix_strtoul(char *s, char **endp, unsigned int base)
136 int last, shift_left_factor = 0;
139 last = strlen(value) - 1;
140 if (value[last] == 'K' || value[last] == 'k') {
141 shift_left_factor = 10;
144 if (value[last] == 'M' || value[last] == 'm') {
145 shift_left_factor = 20;
148 if (value[last] == 'G' || value[last] == 'g') {
149 shift_left_factor = 30;
153 return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
157 * This function fills in xfs_mount_t fields based on mount args.
158 * Note: the superblock has _not_ yet been read in.
160 * Note that this function leaks the various device name allocations on
161 * failure. The caller takes care of them.
165 struct xfs_mount *mp,
168 struct super_block *sb = mp->m_super;
169 char *this_char, *value, *eov;
173 __uint8_t iosizelog = 0;
176 * Copy binary VFS mount flags we are interested in.
178 if (sb->s_flags & MS_RDONLY)
179 mp->m_flags |= XFS_MOUNT_RDONLY;
180 if (sb->s_flags & MS_DIRSYNC)
181 mp->m_flags |= XFS_MOUNT_DIRSYNC;
182 if (sb->s_flags & MS_SYNCHRONOUS)
183 mp->m_flags |= XFS_MOUNT_WSYNC;
186 * Set some default flags that could be cleared by the mount option
189 mp->m_flags |= XFS_MOUNT_BARRIER;
190 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
191 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
192 mp->m_flags |= XFS_MOUNT_DELAYLOG;
195 * These can be overridden by the mount option parsing.
203 while ((this_char = strsep(&options, ",")) != NULL) {
206 if ((value = strchr(this_char, '=')) != NULL)
209 if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
210 if (!value || !*value) {
212 "XFS: %s option requires an argument",
216 mp->m_logbufs = simple_strtoul(value, &eov, 10);
217 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
218 if (!value || !*value) {
220 "XFS: %s option requires an argument",
224 mp->m_logbsize = suffix_strtoul(value, &eov, 10);
225 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
226 if (!value || !*value) {
228 "XFS: %s option requires an argument",
232 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
235 } else if (!strcmp(this_char, MNTOPT_MTPT)) {
237 "XFS: %s option not allowed on this system",
240 } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
241 if (!value || !*value) {
243 "XFS: %s option requires an argument",
247 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
250 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
251 if (!value || !*value) {
253 "XFS: %s option requires an argument",
257 iosize = simple_strtoul(value, &eov, 10);
258 iosizelog = ffs(iosize) - 1;
259 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
260 if (!value || !*value) {
262 "XFS: %s option requires an argument",
266 iosize = suffix_strtoul(value, &eov, 10);
267 iosizelog = ffs(iosize) - 1;
268 } else if (!strcmp(this_char, MNTOPT_GRPID) ||
269 !strcmp(this_char, MNTOPT_BSDGROUPS)) {
270 mp->m_flags |= XFS_MOUNT_GRPID;
271 } else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
272 !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
273 mp->m_flags &= ~XFS_MOUNT_GRPID;
274 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
275 mp->m_flags |= XFS_MOUNT_WSYNC;
276 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
277 mp->m_flags |= XFS_MOUNT_NORECOVERY;
278 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
279 mp->m_flags |= XFS_MOUNT_NOALIGN;
280 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
281 mp->m_flags |= XFS_MOUNT_SWALLOC;
282 } else if (!strcmp(this_char, MNTOPT_SUNIT)) {
283 if (!value || !*value) {
285 "XFS: %s option requires an argument",
289 dsunit = simple_strtoul(value, &eov, 10);
290 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
291 if (!value || !*value) {
293 "XFS: %s option requires an argument",
297 dswidth = simple_strtoul(value, &eov, 10);
298 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
299 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
302 "XFS: %s option not allowed on this system",
306 } else if (!strcmp(this_char, MNTOPT_NOUUID)) {
307 mp->m_flags |= XFS_MOUNT_NOUUID;
308 } else if (!strcmp(this_char, MNTOPT_BARRIER)) {
309 mp->m_flags |= XFS_MOUNT_BARRIER;
310 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
311 mp->m_flags &= ~XFS_MOUNT_BARRIER;
312 } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
313 mp->m_flags |= XFS_MOUNT_IKEEP;
314 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
315 mp->m_flags &= ~XFS_MOUNT_IKEEP;
316 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
317 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
318 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
319 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
320 } else if (!strcmp(this_char, MNTOPT_ATTR2)) {
321 mp->m_flags |= XFS_MOUNT_ATTR2;
322 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
323 mp->m_flags &= ~XFS_MOUNT_ATTR2;
324 mp->m_flags |= XFS_MOUNT_NOATTR2;
325 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
326 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
327 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
328 mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
329 XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
330 XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
331 XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD);
332 } else if (!strcmp(this_char, MNTOPT_QUOTA) ||
333 !strcmp(this_char, MNTOPT_UQUOTA) ||
334 !strcmp(this_char, MNTOPT_USRQUOTA)) {
335 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
337 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
338 !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
339 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
340 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
341 } else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
342 !strcmp(this_char, MNTOPT_PRJQUOTA)) {
343 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
345 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
346 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
347 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
348 } else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
349 !strcmp(this_char, MNTOPT_GRPQUOTA)) {
350 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
352 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
353 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
354 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
355 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
356 mp->m_flags |= XFS_MOUNT_DELAYLOG;
357 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
358 mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
359 } else if (!strcmp(this_char, "ihashsize")) {
361 "XFS: ihashsize no longer used, option is deprecated.");
362 } else if (!strcmp(this_char, "osyncisdsync")) {
364 "XFS: osyncisdsync has no effect, option is deprecated.");
365 } else if (!strcmp(this_char, "osyncisosync")) {
367 "XFS: osyncisosync has no effect, option is deprecated.");
368 } else if (!strcmp(this_char, "irixsgid")) {
370 "XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
373 "XFS: unknown mount option [%s].", this_char);
379 * no recovery flag requires a read-only mount
381 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
382 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
383 cmn_err(CE_WARN, "XFS: no-recovery mounts must be read-only.");
387 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
389 "XFS: sunit and swidth options incompatible with the noalign option");
393 #ifndef CONFIG_XFS_QUOTA
394 if (XFS_IS_QUOTA_RUNNING(mp)) {
396 "XFS: quota support not available in this kernel.");
401 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
402 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
404 "XFS: cannot mount with both project and group quota");
408 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
410 "XFS: sunit and swidth must be specified together");
414 if (dsunit && (dswidth % dsunit != 0)) {
416 "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)",
422 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
424 * At this point the superblock has not been read
425 * in, therefore we do not know the block size.
426 * Before the mount call ends we will convert
430 mp->m_dalign = dsunit;
431 mp->m_flags |= XFS_MOUNT_RETERR;
435 mp->m_swidth = dswidth;
438 if (mp->m_logbufs != -1 &&
439 mp->m_logbufs != 0 &&
440 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
441 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
443 "XFS: invalid logbufs value: %d [not %d-%d]",
444 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
445 return XFS_ERROR(EINVAL);
447 if (mp->m_logbsize != -1 &&
448 mp->m_logbsize != 0 &&
449 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
450 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
451 !is_power_of_2(mp->m_logbsize))) {
453 "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
455 return XFS_ERROR(EINVAL);
458 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
461 mp->m_fsname_len = strlen(mp->m_fsname) + 1;
464 if (iosizelog > XFS_MAX_IO_LOG ||
465 iosizelog < XFS_MIN_IO_LOG) {
467 "XFS: invalid log iosize: %d [not %d-%d]",
468 iosizelog, XFS_MIN_IO_LOG,
470 return XFS_ERROR(EINVAL);
473 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
474 mp->m_readio_log = iosizelog;
475 mp->m_writeio_log = iosizelog;
481 struct proc_xfs_info {
488 struct xfs_mount *mp,
491 static struct proc_xfs_info xfs_info_set[] = {
492 /* the few simple ones we can get from the mount struct */
493 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
494 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
495 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
496 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
497 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
498 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
499 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
500 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
501 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
502 { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
505 static struct proc_xfs_info xfs_info_unset[] = {
506 /* the few simple ones we can get from the mount struct */
507 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO },
508 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER },
509 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE },
512 struct proc_xfs_info *xfs_infop;
514 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
515 if (mp->m_flags & xfs_infop->flag)
516 seq_puts(m, xfs_infop->str);
518 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
519 if (!(mp->m_flags & xfs_infop->flag))
520 seq_puts(m, xfs_infop->str);
523 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
524 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
525 (int)(1 << mp->m_writeio_log) >> 10);
527 if (mp->m_logbufs > 0)
528 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
529 if (mp->m_logbsize > 0)
530 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
533 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
535 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
537 if (mp->m_dalign > 0)
538 seq_printf(m, "," MNTOPT_SUNIT "=%d",
539 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
540 if (mp->m_swidth > 0)
541 seq_printf(m, "," MNTOPT_SWIDTH "=%d",
542 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
544 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
545 seq_puts(m, "," MNTOPT_USRQUOTA);
546 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
547 seq_puts(m, "," MNTOPT_UQUOTANOENF);
549 /* Either project or group quotas can be active, not both */
551 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
552 if (mp->m_qflags & XFS_OQUOTA_ENFD)
553 seq_puts(m, "," MNTOPT_PRJQUOTA);
555 seq_puts(m, "," MNTOPT_PQUOTANOENF);
556 } else if (mp->m_qflags & XFS_GQUOTA_ACCT) {
557 if (mp->m_qflags & XFS_OQUOTA_ENFD)
558 seq_puts(m, "," MNTOPT_GRPQUOTA);
560 seq_puts(m, "," MNTOPT_GQUOTANOENF);
563 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
564 seq_puts(m, "," MNTOPT_NOQUOTA);
570 unsigned int blockshift)
572 unsigned int pagefactor = 1;
573 unsigned int bitshift = BITS_PER_LONG - 1;
575 /* Figure out maximum filesize, on Linux this can depend on
576 * the filesystem blocksize (on 32 bit platforms).
577 * __block_write_begin does this in an [unsigned] long...
578 * page->index << (PAGE_CACHE_SHIFT - bbits)
579 * So, for page sized blocks (4K on 32 bit platforms),
580 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
581 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
582 * but for smaller blocksizes it is less (bbits = log2 bsize).
583 * Note1: get_block_t takes a long (implicit cast from above)
584 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
585 * can optionally convert the [unsigned] long from above into
586 * an [unsigned] long long.
589 #if BITS_PER_LONG == 32
590 # if defined(CONFIG_LBDAF)
591 ASSERT(sizeof(sector_t) == 8);
592 pagefactor = PAGE_CACHE_SIZE;
593 bitshift = BITS_PER_LONG;
595 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
599 return (((__uint64_t)pagefactor) << bitshift) - 1;
606 struct block_device **bdevp)
610 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
612 if (IS_ERR(*bdevp)) {
613 error = PTR_ERR(*bdevp);
614 printk("XFS: Invalid device [%s], error=%d\n", name, error);
622 struct block_device *bdev)
625 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
629 * Try to write out the superblock using barriers.
635 xfs_buf_t *sbp = xfs_getsb(mp, 0);
640 XFS_BUF_UNDELAYWRITE(sbp);
642 XFS_BUF_UNASYNC(sbp);
643 XFS_BUF_ORDERED(sbp);
646 error = xfs_buf_iowait(sbp);
649 * Clear all the flags we set and possible error state in the
650 * buffer. We only did the write to try out whether barriers
651 * worked and shouldn't leave any traces in the superblock
655 XFS_BUF_ERROR(sbp, 0);
656 XFS_BUF_UNORDERED(sbp);
663 xfs_mountfs_check_barriers(xfs_mount_t *mp)
667 if (mp->m_logdev_targp != mp->m_ddev_targp) {
668 xfs_fs_cmn_err(CE_NOTE, mp,
669 "Disabling barriers, not supported with external log device");
670 mp->m_flags &= ~XFS_MOUNT_BARRIER;
674 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
675 xfs_fs_cmn_err(CE_NOTE, mp,
676 "Disabling barriers, underlying device is readonly");
677 mp->m_flags &= ~XFS_MOUNT_BARRIER;
681 error = xfs_barrier_test(mp);
683 xfs_fs_cmn_err(CE_NOTE, mp,
684 "Disabling barriers, trial barrier write failed");
685 mp->m_flags &= ~XFS_MOUNT_BARRIER;
691 xfs_blkdev_issue_flush(
692 xfs_buftarg_t *buftarg)
694 blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
699 struct xfs_mount *mp)
701 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
702 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
703 xfs_free_buftarg(mp, mp->m_logdev_targp);
704 xfs_blkdev_put(logdev);
706 if (mp->m_rtdev_targp) {
707 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
708 xfs_free_buftarg(mp, mp->m_rtdev_targp);
709 xfs_blkdev_put(rtdev);
711 xfs_free_buftarg(mp, mp->m_ddev_targp);
715 * The file system configurations are:
716 * (1) device (partition) with data and internal log
717 * (2) logical volume with data and log subvolumes.
718 * (3) logical volume with data, log, and realtime subvolumes.
720 * We only have to handle opening the log and realtime volumes here if
721 * they are present. The data subvolume has already been opened by
722 * get_sb_bdev() and is stored in sb->s_bdev.
726 struct xfs_mount *mp)
728 struct block_device *ddev = mp->m_super->s_bdev;
729 struct block_device *logdev = NULL, *rtdev = NULL;
733 * Open real time and log devices - order is important.
736 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
742 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
744 goto out_close_logdev;
746 if (rtdev == ddev || rtdev == logdev) {
748 "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev.");
750 goto out_close_rtdev;
755 * Setup xfs_mount buffer target pointers
758 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname);
759 if (!mp->m_ddev_targp)
760 goto out_close_rtdev;
763 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1,
765 if (!mp->m_rtdev_targp)
766 goto out_free_ddev_targ;
769 if (logdev && logdev != ddev) {
770 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1,
772 if (!mp->m_logdev_targp)
773 goto out_free_rtdev_targ;
775 mp->m_logdev_targp = mp->m_ddev_targp;
781 if (mp->m_rtdev_targp)
782 xfs_free_buftarg(mp, mp->m_rtdev_targp);
784 xfs_free_buftarg(mp, mp->m_ddev_targp);
787 xfs_blkdev_put(rtdev);
789 if (logdev && logdev != ddev)
790 xfs_blkdev_put(logdev);
796 * Setup xfs_mount buffer target pointers based on superblock
800 struct xfs_mount *mp)
804 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
805 mp->m_sb.sb_sectsize);
809 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
810 unsigned int log_sector_size = BBSIZE;
812 if (xfs_sb_version_hassector(&mp->m_sb))
813 log_sector_size = mp->m_sb.sb_logsectsize;
814 error = xfs_setsize_buftarg(mp->m_logdev_targp,
815 mp->m_sb.sb_blocksize,
820 if (mp->m_rtdev_targp) {
821 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
822 mp->m_sb.sb_blocksize,
823 mp->m_sb.sb_sectsize);
832 * XFS AIL push thread support
836 struct xfs_ail *ailp,
837 xfs_lsn_t threshold_lsn)
839 /* only ever move the target forwards */
840 if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
841 ailp->xa_target = threshold_lsn;
842 wake_up_process(ailp->xa_task);
850 struct xfs_ail *ailp = data;
851 xfs_lsn_t last_pushed_lsn = 0;
852 long tout = 0; /* milliseconds */
854 while (!kthread_should_stop()) {
856 * for short sleeps indicating congestion, don't allow us to
857 * get woken early. Otherwise all we do is bang on the AIL lock
858 * without making progress.
860 if (tout && tout <= 20)
861 __set_current_state(TASK_KILLABLE);
863 __set_current_state(TASK_INTERRUPTIBLE);
864 schedule_timeout(tout ?
865 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
870 ASSERT(ailp->xa_mount->m_log);
871 if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
874 tout = xfsaild_push(ailp, &last_pushed_lsn);
882 struct xfs_ail *ailp)
885 ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
886 ailp->xa_mount->m_fsname);
887 if (IS_ERR(ailp->xa_task))
888 return -PTR_ERR(ailp->xa_task);
894 struct xfs_ail *ailp)
896 kthread_stop(ailp->xa_task);
900 /* Catch misguided souls that try to use this interface on XFS */
901 STATIC struct inode *
903 struct super_block *sb)
910 * Now that the generic code is guaranteed not to be accessing
911 * the linux inode, we can reclaim the inode.
914 xfs_fs_destroy_inode(
917 struct xfs_inode *ip = XFS_I(inode);
919 trace_xfs_destroy_inode(ip);
921 XFS_STATS_INC(vn_reclaim);
923 /* bad inode, get out here ASAP */
924 if (is_bad_inode(inode))
929 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
932 * We should never get here with one of the reclaim flags already set.
934 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
935 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
938 * We always use background reclaim here because even if the
939 * inode is clean, it still may be under IO and hence we have
940 * to take the flush lock. The background reclaim path handles
941 * this more efficiently than we can here, so simply let background
942 * reclaim tear down all inodes.
945 xfs_inode_set_reclaim_tag(ip);
949 * Slab object creation initialisation for the XFS inode.
950 * This covers only the idempotent fields in the XFS inode;
951 * all other fields need to be initialised on allocation
952 * from the slab. This avoids the need to repeatedly initialise
953 * fields in the xfs inode that left in the initialise state
954 * when freeing the inode.
957 xfs_fs_inode_init_once(
960 struct xfs_inode *ip = inode;
962 memset(ip, 0, sizeof(struct xfs_inode));
965 inode_init_once(VFS_I(ip));
968 atomic_set(&ip->i_iocount, 0);
969 atomic_set(&ip->i_pincount, 0);
970 spin_lock_init(&ip->i_flags_lock);
971 init_waitqueue_head(&ip->i_ipin_wait);
973 * Because we want to use a counting completion, complete
974 * the flush completion once to allow a single access to
975 * the flush completion without blocking.
977 init_completion(&ip->i_flush);
978 complete(&ip->i_flush);
980 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
981 "xfsino", ip->i_ino);
985 * Dirty the XFS inode when mark_inode_dirty_sync() is called so that
986 * we catch unlogged VFS level updates to the inode.
988 * We need the barrier() to maintain correct ordering between unlogged
989 * updates and the transaction commit code that clears the i_update_core
990 * field. This requires all updates to be completed before marking the
998 XFS_I(inode)->i_update_core = 1;
1003 struct xfs_inode *ip)
1005 struct xfs_mount *mp = ip->i_mount;
1006 struct xfs_trans *tp;
1009 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1010 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
1011 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
1014 xfs_trans_cancel(tp, 0);
1015 /* we need to return with the lock hold shared */
1016 xfs_ilock(ip, XFS_ILOCK_SHARED);
1020 xfs_ilock(ip, XFS_ILOCK_EXCL);
1023 * Note - it's possible that we might have pushed ourselves out of the
1024 * way during trans_reserve which would flush the inode. But there's
1025 * no guarantee that the inode buffer has actually gone out yet (it's
1026 * delwri). Plus the buffer could be pinned anyway if it's part of
1027 * an inode in another recent transaction. So we play it safe and
1028 * fire off the transaction anyway.
1030 xfs_trans_ijoin(tp, ip);
1031 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1032 error = xfs_trans_commit(tp, 0);
1033 xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
1040 struct inode *inode,
1041 struct writeback_control *wbc)
1043 struct xfs_inode *ip = XFS_I(inode);
1044 struct xfs_mount *mp = ip->i_mount;
1047 trace_xfs_write_inode(ip);
1049 if (XFS_FORCED_SHUTDOWN(mp))
1050 return XFS_ERROR(EIO);
1052 if (wbc->sync_mode == WB_SYNC_ALL) {
1054 * Make sure the inode has made it it into the log. Instead
1055 * of forcing it all the way to stable storage using a
1056 * synchronous transaction we let the log force inside the
1057 * ->sync_fs call do that for thus, which reduces the number
1058 * of synchronous log foces dramatically.
1061 xfs_ilock(ip, XFS_ILOCK_SHARED);
1062 if (ip->i_update_core) {
1063 error = xfs_log_inode(ip);
1069 * We make this non-blocking if the inode is contended, return
1070 * EAGAIN to indicate to the caller that they did not succeed.
1071 * This prevents the flush path from blocking on inodes inside
1072 * another operation right now, they get caught later by
1075 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
1078 if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
1082 * Now we have the flush lock and the inode is not pinned, we
1083 * can check if the inode is really clean as we know that
1084 * there are no pending transaction completions, it is not
1085 * waiting on the delayed write queue and there is no IO in
1088 if (xfs_inode_clean(ip)) {
1093 error = xfs_iflush(ip, 0);
1097 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1100 * if we failed to write out the inode then mark
1101 * it dirty again so we'll try again later.
1104 xfs_mark_inode_dirty_sync(ip);
1110 struct inode *inode)
1112 xfs_inode_t *ip = XFS_I(inode);
1114 trace_xfs_evict_inode(ip);
1116 truncate_inode_pages(&inode->i_data, 0);
1117 end_writeback(inode);
1118 XFS_STATS_INC(vn_rele);
1119 XFS_STATS_INC(vn_remove);
1120 XFS_STATS_DEC(vn_active);
1123 * The iolock is used by the file system to coordinate reads,
1124 * writes, and block truncates. Up to this point the lock
1125 * protected concurrent accesses by users of the inode. But
1126 * from here forward we're doing some final processing of the
1127 * inode because we're done with it, and although we reuse the
1128 * iolock for protection it is really a distinct lock class
1129 * (in the lockdep sense) from before. To keep lockdep happy
1130 * (and basically indicate what we are doing), we explicitly
1131 * re-init the iolock here.
1133 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
1134 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
1135 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
1136 &xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
1143 struct xfs_mount *mp)
1145 kfree(mp->m_fsname);
1146 kfree(mp->m_rtname);
1147 kfree(mp->m_logname);
1152 struct super_block *sb)
1154 struct xfs_mount *mp = XFS_M(sb);
1157 * Unregister the memory shrinker before we tear down the mount
1158 * structure so we don't have memory reclaim racing with us here.
1160 xfs_inode_shrinker_unregister(mp);
1164 * Blow away any referenced inode in the filestreams cache.
1165 * This can and will cause log traffic as inodes go inactive
1168 xfs_filestream_unmount(mp);
1170 XFS_bflush(mp->m_ddev_targp);
1174 xfs_icsb_destroy_counters(mp);
1175 xfs_close_devices(mp);
1176 xfs_free_fsname(mp);
1182 struct super_block *sb,
1185 struct xfs_mount *mp = XFS_M(sb);
1189 * Not much we can do for the first async pass. Writing out the
1190 * superblock would be counter-productive as we are going to redirty
1191 * when writing out other data and metadata (and writing out a single
1192 * block is quite fast anyway).
1194 * Try to asynchronously kick off quota syncing at least.
1197 xfs_qm_sync(mp, SYNC_TRYLOCK);
1201 error = xfs_quiesce_data(mp);
1206 int prev_sync_seq = mp->m_sync_seq;
1209 * The disk must be active because we're syncing.
1210 * We schedule xfssyncd now (now that the disk is
1211 * active) instead of later (when it might not be).
1213 wake_up_process(mp->m_sync_task);
1215 * We have to wait for the sync iteration to complete.
1216 * If we don't, the disk activity caused by the sync
1217 * will come after the sync is completed, and that
1218 * triggers another sync from laptop mode.
1220 wait_event(mp->m_wait_single_sync_task,
1221 mp->m_sync_seq != prev_sync_seq);
1229 struct dentry *dentry,
1230 struct kstatfs *statp)
1232 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1233 xfs_sb_t *sbp = &mp->m_sb;
1234 struct xfs_inode *ip = XFS_I(dentry->d_inode);
1235 __uint64_t fakeinos, id;
1239 statp->f_type = XFS_SB_MAGIC;
1240 statp->f_namelen = MAXNAMELEN - 1;
1242 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1243 statp->f_fsid.val[0] = (u32)id;
1244 statp->f_fsid.val[1] = (u32)(id >> 32);
1246 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
1248 spin_lock(&mp->m_sb_lock);
1249 statp->f_bsize = sbp->sb_blocksize;
1250 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1251 statp->f_blocks = sbp->sb_dblocks - lsize;
1252 statp->f_bfree = statp->f_bavail =
1253 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1254 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1256 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1257 if (mp->m_maxicount)
1258 statp->f_files = min_t(typeof(statp->f_files),
1262 /* make sure statp->f_ffree does not underflow */
1263 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1264 statp->f_ffree = max_t(__int64_t, ffree, 0);
1266 spin_unlock(&mp->m_sb_lock);
1268 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
1269 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
1270 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
1271 xfs_qm_statvfs(ip, statp);
1276 xfs_save_resvblks(struct xfs_mount *mp)
1278 __uint64_t resblks = 0;
1280 mp->m_resblks_save = mp->m_resblks;
1281 xfs_reserve_blocks(mp, &resblks, NULL);
1285 xfs_restore_resvblks(struct xfs_mount *mp)
1289 if (mp->m_resblks_save) {
1290 resblks = mp->m_resblks_save;
1291 mp->m_resblks_save = 0;
1293 resblks = xfs_default_resblks(mp);
1295 xfs_reserve_blocks(mp, &resblks, NULL);
1300 struct super_block *sb,
1304 struct xfs_mount *mp = XFS_M(sb);
1305 substring_t args[MAX_OPT_ARGS];
1309 while ((p = strsep(&options, ",")) != NULL) {
1315 token = match_token(p, tokens, args);
1318 mp->m_flags |= XFS_MOUNT_BARRIER;
1321 * Test if barriers are actually working if we can,
1322 * else delay this check until the filesystem is
1325 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
1326 xfs_mountfs_check_barriers(mp);
1329 mp->m_flags &= ~XFS_MOUNT_BARRIER;
1333 * Logically we would return an error here to prevent
1334 * users from believing they might have changed
1335 * mount options using remount which can't be changed.
1337 * But unfortunately mount(8) adds all options from
1338 * mtab and fstab to the mount arguments in some cases
1339 * so we can't blindly reject options, but have to
1340 * check for each specified option if it actually
1341 * differs from the currently set option and only
1342 * reject it if that's the case.
1344 * Until that is implemented we return success for
1345 * every remount request, and silently ignore all
1346 * options that we can't actually change.
1350 "XFS: mount option \"%s\" not supported for remount\n", p);
1359 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1360 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1361 if (mp->m_flags & XFS_MOUNT_BARRIER)
1362 xfs_mountfs_check_barriers(mp);
1365 * If this is the first remount to writeable state we
1366 * might have some superblock changes to update.
1368 if (mp->m_update_flags) {
1369 error = xfs_mount_log_sb(mp, mp->m_update_flags);
1372 "XFS: failed to write sb changes");
1375 mp->m_update_flags = 0;
1379 * Fill out the reserve pool if it is empty. Use the stashed
1380 * value if it is non-zero, otherwise go with the default.
1382 xfs_restore_resvblks(mp);
1386 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1388 * After we have synced the data but before we sync the
1389 * metadata, we need to free up the reserve block pool so that
1390 * the used block count in the superblock on disk is correct at
1391 * the end of the remount. Stash the current reserve pool size
1392 * so that if we get remounted rw, we can return it to the same
1396 xfs_quiesce_data(mp);
1397 xfs_save_resvblks(mp);
1398 xfs_quiesce_attr(mp);
1399 mp->m_flags |= XFS_MOUNT_RDONLY;
1406 * Second stage of a freeze. The data is already frozen so we only
1407 * need to take care of the metadata. Once that's done write a dummy
1408 * record to dirty the log in case of a crash while frozen.
1412 struct super_block *sb)
1414 struct xfs_mount *mp = XFS_M(sb);
1416 xfs_save_resvblks(mp);
1417 xfs_quiesce_attr(mp);
1418 return -xfs_fs_log_dummy(mp);
1423 struct super_block *sb)
1425 struct xfs_mount *mp = XFS_M(sb);
1427 xfs_restore_resvblks(mp);
1432 xfs_fs_show_options(
1434 struct vfsmount *mnt)
1436 return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1440 * This function fills in xfs_mount_t fields based on mount args.
1441 * Note: the superblock _has_ now been read in.
1445 struct xfs_mount *mp)
1447 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1449 /* Fail a mount where the logbuf is smaller than the log stripe */
1450 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1451 if (mp->m_logbsize <= 0 &&
1452 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1453 mp->m_logbsize = mp->m_sb.sb_logsunit;
1454 } else if (mp->m_logbsize > 0 &&
1455 mp->m_logbsize < mp->m_sb.sb_logsunit) {
1457 "XFS: logbuf size must be greater than or equal to log stripe size");
1458 return XFS_ERROR(EINVAL);
1461 /* Fail a mount if the logbuf is larger than 32K */
1462 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1464 "XFS: logbuf size for version 1 logs must be 16K or 32K");
1465 return XFS_ERROR(EINVAL);
1470 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1471 * told by noattr2 to turn it off
1473 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1474 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1475 mp->m_flags |= XFS_MOUNT_ATTR2;
1478 * prohibit r/w mounts of read-only filesystems
1480 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1482 "XFS: cannot mount a read-only filesystem as read-write");
1483 return XFS_ERROR(EROFS);
1491 struct super_block *sb,
1496 struct xfs_mount *mp = NULL;
1497 int flags = 0, error = ENOMEM;
1499 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1503 spin_lock_init(&mp->m_sb_lock);
1504 mutex_init(&mp->m_growlock);
1505 atomic_set(&mp->m_active_trans, 0);
1506 INIT_LIST_HEAD(&mp->m_sync_list);
1507 spin_lock_init(&mp->m_sync_lock);
1508 init_waitqueue_head(&mp->m_wait_single_sync_task);
1513 error = xfs_parseargs(mp, (char *)data);
1515 goto out_free_fsname;
1517 sb_min_blocksize(sb, BBSIZE);
1518 sb->s_xattr = xfs_xattr_handlers;
1519 sb->s_export_op = &xfs_export_operations;
1520 #ifdef CONFIG_XFS_QUOTA
1521 sb->s_qcop = &xfs_quotactl_operations;
1523 sb->s_op = &xfs_super_operations;
1526 flags |= XFS_MFSI_QUIET;
1528 error = xfs_open_devices(mp);
1530 goto out_free_fsname;
1532 error = xfs_icsb_init_counters(mp);
1534 goto out_close_devices;
1536 error = xfs_readsb(mp, flags);
1538 goto out_destroy_counters;
1540 error = xfs_finish_flags(mp);
1544 error = xfs_setup_devices(mp);
1548 if (mp->m_flags & XFS_MOUNT_BARRIER)
1549 xfs_mountfs_check_barriers(mp);
1551 error = xfs_filestream_mount(mp);
1555 error = xfs_mountfs(mp);
1557 goto out_filestream_unmount;
1559 sb->s_magic = XFS_SB_MAGIC;
1560 sb->s_blocksize = mp->m_sb.sb_blocksize;
1561 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1562 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1563 sb->s_time_gran = 1;
1564 set_posix_acl_flag(sb);
1566 root = igrab(VFS_I(mp->m_rootip));
1571 if (is_bad_inode(root)) {
1575 sb->s_root = d_alloc_root(root);
1581 error = xfs_syncd_init(mp);
1585 xfs_inode_shrinker_register(mp);
1589 out_filestream_unmount:
1590 xfs_filestream_unmount(mp);
1593 out_destroy_counters:
1594 xfs_icsb_destroy_counters(mp);
1596 xfs_close_devices(mp);
1598 xfs_free_fsname(mp);
1613 * Blow away any referenced inode in the filestreams cache.
1614 * This can and will cause log traffic as inodes go inactive
1617 xfs_filestream_unmount(mp);
1619 XFS_bflush(mp->m_ddev_targp);
1625 STATIC struct dentry *
1627 struct file_system_type *fs_type,
1629 const char *dev_name,
1632 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1635 static const struct super_operations xfs_super_operations = {
1636 .alloc_inode = xfs_fs_alloc_inode,
1637 .destroy_inode = xfs_fs_destroy_inode,
1638 .dirty_inode = xfs_fs_dirty_inode,
1639 .write_inode = xfs_fs_write_inode,
1640 .evict_inode = xfs_fs_evict_inode,
1641 .put_super = xfs_fs_put_super,
1642 .sync_fs = xfs_fs_sync_fs,
1643 .freeze_fs = xfs_fs_freeze,
1644 .unfreeze_fs = xfs_fs_unfreeze,
1645 .statfs = xfs_fs_statfs,
1646 .remount_fs = xfs_fs_remount,
1647 .show_options = xfs_fs_show_options,
1650 static struct file_system_type xfs_fs_type = {
1651 .owner = THIS_MODULE,
1653 .mount = xfs_fs_mount,
1654 .kill_sb = kill_block_super,
1655 .fs_flags = FS_REQUIRES_DEV,
1659 xfs_init_zones(void)
1662 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
1663 if (!xfs_ioend_zone)
1666 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
1668 if (!xfs_ioend_pool)
1669 goto out_destroy_ioend_zone;
1671 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1673 if (!xfs_log_ticket_zone)
1674 goto out_destroy_ioend_pool;
1676 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
1677 "xfs_bmap_free_item");
1678 if (!xfs_bmap_free_item_zone)
1679 goto out_destroy_log_ticket_zone;
1681 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1683 if (!xfs_btree_cur_zone)
1684 goto out_destroy_bmap_free_item_zone;
1686 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1688 if (!xfs_da_state_zone)
1689 goto out_destroy_btree_cur_zone;
1691 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
1692 if (!xfs_dabuf_zone)
1693 goto out_destroy_da_state_zone;
1695 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1696 if (!xfs_ifork_zone)
1697 goto out_destroy_dabuf_zone;
1699 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1700 if (!xfs_trans_zone)
1701 goto out_destroy_ifork_zone;
1703 xfs_log_item_desc_zone =
1704 kmem_zone_init(sizeof(struct xfs_log_item_desc),
1705 "xfs_log_item_desc");
1706 if (!xfs_log_item_desc_zone)
1707 goto out_destroy_trans_zone;
1710 * The size of the zone allocated buf log item is the maximum
1711 * size possible under XFS. This wastes a little bit of memory,
1712 * but it is much faster.
1714 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
1715 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
1716 NBWORD) * sizeof(int))), "xfs_buf_item");
1717 if (!xfs_buf_item_zone)
1718 goto out_destroy_log_item_desc_zone;
1720 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1721 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1722 sizeof(xfs_extent_t))), "xfs_efd_item");
1724 goto out_destroy_buf_item_zone;
1726 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1727 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1728 sizeof(xfs_extent_t))), "xfs_efi_item");
1730 goto out_destroy_efd_zone;
1733 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1734 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD,
1735 xfs_fs_inode_init_once);
1736 if (!xfs_inode_zone)
1737 goto out_destroy_efi_zone;
1740 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1741 KM_ZONE_SPREAD, NULL);
1743 goto out_destroy_inode_zone;
1747 out_destroy_inode_zone:
1748 kmem_zone_destroy(xfs_inode_zone);
1749 out_destroy_efi_zone:
1750 kmem_zone_destroy(xfs_efi_zone);
1751 out_destroy_efd_zone:
1752 kmem_zone_destroy(xfs_efd_zone);
1753 out_destroy_buf_item_zone:
1754 kmem_zone_destroy(xfs_buf_item_zone);
1755 out_destroy_log_item_desc_zone:
1756 kmem_zone_destroy(xfs_log_item_desc_zone);
1757 out_destroy_trans_zone:
1758 kmem_zone_destroy(xfs_trans_zone);
1759 out_destroy_ifork_zone:
1760 kmem_zone_destroy(xfs_ifork_zone);
1761 out_destroy_dabuf_zone:
1762 kmem_zone_destroy(xfs_dabuf_zone);
1763 out_destroy_da_state_zone:
1764 kmem_zone_destroy(xfs_da_state_zone);
1765 out_destroy_btree_cur_zone:
1766 kmem_zone_destroy(xfs_btree_cur_zone);
1767 out_destroy_bmap_free_item_zone:
1768 kmem_zone_destroy(xfs_bmap_free_item_zone);
1769 out_destroy_log_ticket_zone:
1770 kmem_zone_destroy(xfs_log_ticket_zone);
1771 out_destroy_ioend_pool:
1772 mempool_destroy(xfs_ioend_pool);
1773 out_destroy_ioend_zone:
1774 kmem_zone_destroy(xfs_ioend_zone);
1780 xfs_destroy_zones(void)
1782 kmem_zone_destroy(xfs_ili_zone);
1783 kmem_zone_destroy(xfs_inode_zone);
1784 kmem_zone_destroy(xfs_efi_zone);
1785 kmem_zone_destroy(xfs_efd_zone);
1786 kmem_zone_destroy(xfs_buf_item_zone);
1787 kmem_zone_destroy(xfs_log_item_desc_zone);
1788 kmem_zone_destroy(xfs_trans_zone);
1789 kmem_zone_destroy(xfs_ifork_zone);
1790 kmem_zone_destroy(xfs_dabuf_zone);
1791 kmem_zone_destroy(xfs_da_state_zone);
1792 kmem_zone_destroy(xfs_btree_cur_zone);
1793 kmem_zone_destroy(xfs_bmap_free_item_zone);
1794 kmem_zone_destroy(xfs_log_ticket_zone);
1795 mempool_destroy(xfs_ioend_pool);
1796 kmem_zone_destroy(xfs_ioend_zone);
1805 printk(KERN_INFO XFS_VERSION_STRING " with "
1806 XFS_BUILD_OPTIONS " enabled\n");
1811 error = xfs_init_zones();
1815 error = xfs_mru_cache_init();
1817 goto out_destroy_zones;
1819 error = xfs_filestream_init();
1821 goto out_mru_cache_uninit;
1823 error = xfs_buf_init();
1825 goto out_filestream_uninit;
1827 error = xfs_init_procfs();
1829 goto out_buf_terminate;
1831 error = xfs_sysctl_register();
1833 goto out_cleanup_procfs;
1837 error = register_filesystem(&xfs_fs_type);
1839 goto out_sysctl_unregister;
1842 out_sysctl_unregister:
1843 xfs_sysctl_unregister();
1845 xfs_cleanup_procfs();
1847 xfs_buf_terminate();
1848 out_filestream_uninit:
1849 xfs_filestream_uninit();
1850 out_mru_cache_uninit:
1851 xfs_mru_cache_uninit();
1853 xfs_destroy_zones();
1862 unregister_filesystem(&xfs_fs_type);
1863 xfs_sysctl_unregister();
1864 xfs_cleanup_procfs();
1865 xfs_buf_terminate();
1866 xfs_filestream_uninit();
1867 xfs_mru_cache_uninit();
1868 xfs_destroy_zones();
1871 module_init(init_xfs_fs);
1872 module_exit(exit_xfs_fs);
1874 MODULE_AUTHOR("Silicon Graphics, Inc.");
1875 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1876 MODULE_LICENSE("GPL");