4 * Copyright (C) International Business Machines Corp., 2002,2007
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
40 #define DECLARE_GLOBALS_HERE
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
46 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
48 #ifdef CONFIG_CIFS_QUOTA
49 static struct quotactl_ops cifs_quotactl_ops;
52 #ifdef CONFIG_CIFS_EXPERIMENTAL
53 extern struct export_operations cifs_export_ops;
54 #endif /* EXPERIMENTAL */
59 unsigned int oplockEnabled = 1;
60 unsigned int experimEnabled = 0;
61 unsigned int linuxExtEnabled = 1;
62 unsigned int lookupCacheEnabled = 1;
63 unsigned int multiuser_mount = 0;
64 unsigned int extended_security = CIFSSEC_DEF;
65 /* unsigned int ntlmv2_support = 0; */
66 unsigned int sign_CIFS_PDUs = 1;
67 extern struct task_struct * oplockThread; /* remove sparse warning */
68 struct task_struct * oplockThread = NULL;
69 /* extern struct task_struct * dnotifyThread; remove sparse warning */
70 static struct task_struct * dnotifyThread = NULL;
71 static const struct super_operations cifs_super_ops;
72 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
73 module_param(CIFSMaxBufSize, int, 0);
74 MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
78 unsigned int cifs_min_small = 30;
79 module_param(cifs_min_small, int, 0);
80 MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
81 unsigned int cifs_max_pending = CIFS_MAX_REQ;
82 module_param(cifs_max_pending, int, 0);
83 MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
85 extern mempool_t *cifs_sm_req_poolp;
86 extern mempool_t *cifs_req_poolp;
87 extern mempool_t *cifs_mid_poolp;
89 extern struct kmem_cache *cifs_oplock_cachep;
92 cifs_read_super(struct super_block *sb, void *data,
93 const char *devname, int silent)
96 struct cifs_sb_info *cifs_sb;
99 /* BB should we make this contingent on mount parm? */
100 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
101 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
102 cifs_sb = CIFS_SB(sb);
106 rc = cifs_mount(sb, cifs_sb, data, devname);
111 ("cifs_mount failed w/return code = %d", rc));
112 goto out_mount_failed;
115 sb->s_magic = CIFS_MAGIC_NUMBER;
116 sb->s_op = &cifs_super_ops;
117 #ifdef CONFIG_CIFS_EXPERIMENTAL
118 if (experimEnabled != 0)
119 sb->s_export_op = &cifs_export_ops;
120 #endif /* EXPERIMENTAL */
121 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
122 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
123 #ifdef CONFIG_CIFS_QUOTA
124 sb->s_qcop = &cifs_quotactl_ops;
126 sb->s_blocksize = CIFS_MAX_MSGSIZE;
127 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
128 inode = iget(sb, ROOT_I);
135 sb->s_root = d_alloc_root(inode);
145 cERROR(1, ("cifs_read_super: get root inode failed"));
151 if (cifs_sb->local_nls)
152 unload_nls(cifs_sb->local_nls);
159 cifs_put_super(struct super_block *sb)
162 struct cifs_sb_info *cifs_sb;
164 cFYI(1, ("In cifs_put_super"));
165 cifs_sb = CIFS_SB(sb);
166 if (cifs_sb == NULL) {
167 cFYI(1,("Empty cifs superblock info passed to unmount"));
170 rc = cifs_umount(sb, cifs_sb);
172 cERROR(1, ("cifs_umount failed with return code %d", rc));
174 unload_nls(cifs_sb->local_nls);
180 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
182 struct super_block *sb = dentry->d_sb;
184 int rc = -EOPNOTSUPP;
185 struct cifs_sb_info *cifs_sb;
186 struct cifsTconInfo *pTcon;
190 cifs_sb = CIFS_SB(sb);
191 pTcon = cifs_sb->tcon;
193 buf->f_type = CIFS_MAGIC_NUMBER;
195 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
196 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
197 presumably be total path, but note
198 that some servers (includinng Samba 3)
199 have a shorter maximum path */
200 buf->f_files = 0; /* undefined */
201 buf->f_ffree = 0; /* unlimited */
203 /* BB we could add a second check for a QFS Unix capability bit */
204 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
205 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
206 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
207 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
209 /* Only need to call the old QFSInfo if failed
212 if (pTcon->ses->capabilities & CAP_NT_SMBS)
213 rc = CIFSSMBQFSInfo(xid, pTcon, buf); /* not supported by OS2 */
215 /* Some old Windows servers also do not support level 103, retry with
216 older level one if old server failed the previous call or we
217 bypassed it because we detected that this was an older LANMAN sess */
219 rc = SMBOldQFSInfo(xid, pTcon, buf);
224 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
226 return 0; /* always return success? what if volume is no
230 static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
232 struct cifs_sb_info *cifs_sb;
234 cifs_sb = CIFS_SB(inode->i_sb);
236 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
238 } else /* file mode might have been restricted at mount time
239 on the client (above and beyond ACL on servers) for
240 servers which do not support setting and viewing mode bits,
241 so allowing client to check permissions is useful */
242 return generic_permission(inode, mask, NULL);
245 static struct kmem_cache *cifs_inode_cachep;
246 static struct kmem_cache *cifs_req_cachep;
247 static struct kmem_cache *cifs_mid_cachep;
248 struct kmem_cache *cifs_oplock_cachep;
249 static struct kmem_cache *cifs_sm_req_cachep;
250 mempool_t *cifs_sm_req_poolp;
251 mempool_t *cifs_req_poolp;
252 mempool_t *cifs_mid_poolp;
254 static struct inode *
255 cifs_alloc_inode(struct super_block *sb)
257 struct cifsInodeInfo *cifs_inode;
258 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
261 cifs_inode->cifsAttrs = 0x20; /* default */
262 atomic_set(&cifs_inode->inUse, 0);
263 cifs_inode->time = 0;
264 /* Until the file is open and we have gotten oplock
265 info back from the server, can not assume caching of
266 file data or metadata */
267 cifs_inode->clientCanCacheRead = FALSE;
268 cifs_inode->clientCanCacheAll = FALSE;
269 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
271 /* Can not set i_flags here - they get immediately overwritten
272 to zero by the VFS */
273 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
274 INIT_LIST_HEAD(&cifs_inode->openFileList);
275 return &cifs_inode->vfs_inode;
279 cifs_destroy_inode(struct inode *inode)
281 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
285 * cifs_show_options() is for displaying mount options in /proc/mounts.
286 * Not all settable options are displayed but most of the important
290 cifs_show_options(struct seq_file *s, struct vfsmount *m)
292 struct cifs_sb_info *cifs_sb;
294 cifs_sb = CIFS_SB(m->mnt_sb);
298 /* BB add prepath to mount options displayed */
299 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
300 if (cifs_sb->tcon->ses) {
301 if (cifs_sb->tcon->ses->userName)
302 seq_printf(s, ",username=%s",
303 cifs_sb->tcon->ses->userName);
304 if (cifs_sb->tcon->ses->domainName)
305 seq_printf(s, ",domain=%s",
306 cifs_sb->tcon->ses->domainName);
309 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
310 seq_printf(s, ",posixpaths");
311 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
312 !(cifs_sb->tcon->ses->capabilities & CAP_UNIX))
313 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
314 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
315 !(cifs_sb->tcon->ses->capabilities & CAP_UNIX))
316 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
317 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
318 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
323 #ifdef CONFIG_CIFS_QUOTA
324 int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
325 struct fs_disk_quota * pdquota)
329 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
330 struct cifsTconInfo *pTcon;
333 pTcon = cifs_sb->tcon;
340 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
349 int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
350 struct fs_disk_quota * pdquota)
354 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
355 struct cifsTconInfo *pTcon;
358 pTcon = cifs_sb->tcon;
364 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
373 int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
377 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
378 struct cifsTconInfo *pTcon;
381 pTcon = cifs_sb->tcon;
387 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
396 int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
400 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
401 struct cifsTconInfo *pTcon;
404 pTcon = cifs_sb->tcon;
410 cFYI(1,("pqstats %p",qstats));
419 static struct quotactl_ops cifs_quotactl_ops = {
420 .set_xquota = cifs_xquota_set,
421 .get_xquota = cifs_xquota_set,
422 .set_xstate = cifs_xstate_set,
423 .get_xstate = cifs_xstate_get,
427 static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
429 struct cifs_sb_info *cifs_sb;
430 struct cifsTconInfo * tcon;
432 if (!(flags & MNT_FORCE))
434 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
438 tcon = cifs_sb->tcon;
441 down(&tcon->tconSem);
442 if (atomic_read(&tcon->useCount) == 1)
443 tcon->tidStatus = CifsExiting;
446 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
447 /* cancel_notify_requests(tcon); */
448 if (tcon->ses && tcon->ses->server)
450 cFYI(1,("wake up tasks now - umount begin not complete"));
451 wake_up_all(&tcon->ses->server->request_q);
452 wake_up_all(&tcon->ses->server->response_q);
453 msleep(1); /* yield */
454 /* we have to kick the requests once more */
455 wake_up_all(&tcon->ses->server->response_q);
458 /* BB FIXME - finish add checks for tidStatus BB */
463 #ifdef CONFIG_CIFS_STATS2
464 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
471 static int cifs_remount(struct super_block *sb, int *flags, char *data)
473 *flags |= MS_NODIRATIME;
477 static const struct super_operations cifs_super_ops = {
478 .read_inode = cifs_read_inode,
479 .put_super = cifs_put_super,
480 .statfs = cifs_statfs,
481 .alloc_inode = cifs_alloc_inode,
482 .destroy_inode = cifs_destroy_inode,
483 /* .drop_inode = generic_delete_inode,
484 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
485 unless later we add lazy close of inodes or unless the kernel forgets to call
486 us with the same number of releases (closes) as opens */
487 .show_options = cifs_show_options,
488 .umount_begin = cifs_umount_begin,
489 .remount_fs = cifs_remount,
490 #ifdef CONFIG_CIFS_STATS2
491 .show_stats = cifs_show_stats,
496 cifs_get_sb(struct file_system_type *fs_type,
497 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
500 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
502 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
509 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
511 up_write(&sb->s_umount);
512 deactivate_super(sb);
515 sb->s_flags |= MS_ACTIVE;
516 return simple_set_mnt(mnt, sb);
519 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
520 unsigned long nr_segs, loff_t pos)
522 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
525 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
526 if (!CIFS_I(inode)->clientCanCacheAll)
527 filemap_fdatawrite(inode->i_mapping);
531 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
533 /* origin == SEEK_END => we must revalidate the cached file length */
534 if (origin == SEEK_END) {
537 /* some applications poll for the file length in this strange
538 way so we must seek to end on non-oplocked files by
539 setting the revalidate time to zero */
540 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
542 retval = cifs_revalidate(file->f_path.dentry);
544 return (loff_t)retval;
546 return remote_llseek(file, offset, origin);
549 static struct file_system_type cifs_fs_type = {
550 .owner = THIS_MODULE,
552 .get_sb = cifs_get_sb,
553 .kill_sb = kill_anon_super,
556 const struct inode_operations cifs_dir_inode_ops = {
557 .create = cifs_create,
558 .lookup = cifs_lookup,
559 .getattr = cifs_getattr,
560 .unlink = cifs_unlink,
561 .link = cifs_hardlink,
564 .rename = cifs_rename,
565 .permission = cifs_permission,
566 /* revalidate:cifs_revalidate, */
567 .setattr = cifs_setattr,
568 .symlink = cifs_symlink,
570 #ifdef CONFIG_CIFS_XATTR
571 .setxattr = cifs_setxattr,
572 .getxattr = cifs_getxattr,
573 .listxattr = cifs_listxattr,
574 .removexattr = cifs_removexattr,
578 const struct inode_operations cifs_file_inode_ops = {
579 /* revalidate:cifs_revalidate, */
580 .setattr = cifs_setattr,
581 .getattr = cifs_getattr, /* do we need this anymore? */
582 .rename = cifs_rename,
583 .permission = cifs_permission,
584 #ifdef CONFIG_CIFS_XATTR
585 .setxattr = cifs_setxattr,
586 .getxattr = cifs_getxattr,
587 .listxattr = cifs_listxattr,
588 .removexattr = cifs_removexattr,
592 const struct inode_operations cifs_symlink_inode_ops = {
593 .readlink = generic_readlink,
594 .follow_link = cifs_follow_link,
595 .put_link = cifs_put_link,
596 .permission = cifs_permission,
597 /* BB add the following two eventually */
598 /* revalidate: cifs_revalidate,
599 setattr: cifs_notify_change, *//* BB do we need notify change */
600 #ifdef CONFIG_CIFS_XATTR
601 .setxattr = cifs_setxattr,
602 .getxattr = cifs_getxattr,
603 .listxattr = cifs_listxattr,
604 .removexattr = cifs_removexattr,
608 const struct file_operations cifs_file_ops = {
609 .read = do_sync_read,
610 .write = do_sync_write,
611 .aio_read = generic_file_aio_read,
612 .aio_write = cifs_file_aio_write,
614 .release = cifs_close,
618 .mmap = cifs_file_mmap,
619 .sendfile = generic_file_sendfile,
620 .llseek = cifs_llseek,
621 #ifdef CONFIG_CIFS_POSIX
623 #endif /* CONFIG_CIFS_POSIX */
625 #ifdef CONFIG_CIFS_EXPERIMENTAL
626 .dir_notify = cifs_dir_notify,
627 #endif /* CONFIG_CIFS_EXPERIMENTAL */
630 const struct file_operations cifs_file_direct_ops = {
631 /* no mmap, no aio, no readv -
632 BB reevaluate whether they can be done with directio, no cache */
633 .read = cifs_user_read,
634 .write = cifs_user_write,
636 .release = cifs_close,
640 .sendfile = generic_file_sendfile, /* BB removeme BB */
641 #ifdef CONFIG_CIFS_POSIX
643 #endif /* CONFIG_CIFS_POSIX */
644 .llseek = cifs_llseek,
645 #ifdef CONFIG_CIFS_EXPERIMENTAL
646 .dir_notify = cifs_dir_notify,
647 #endif /* CONFIG_CIFS_EXPERIMENTAL */
649 const struct file_operations cifs_file_nobrl_ops = {
650 .read = do_sync_read,
651 .write = do_sync_write,
652 .aio_read = generic_file_aio_read,
653 .aio_write = cifs_file_aio_write,
655 .release = cifs_close,
658 .mmap = cifs_file_mmap,
659 .sendfile = generic_file_sendfile,
660 .llseek = cifs_llseek,
661 #ifdef CONFIG_CIFS_POSIX
663 #endif /* CONFIG_CIFS_POSIX */
665 #ifdef CONFIG_CIFS_EXPERIMENTAL
666 .dir_notify = cifs_dir_notify,
667 #endif /* CONFIG_CIFS_EXPERIMENTAL */
670 const struct file_operations cifs_file_direct_nobrl_ops = {
671 /* no mmap, no aio, no readv -
672 BB reevaluate whether they can be done with directio, no cache */
673 .read = cifs_user_read,
674 .write = cifs_user_write,
676 .release = cifs_close,
679 .sendfile = generic_file_sendfile, /* BB removeme BB */
680 #ifdef CONFIG_CIFS_POSIX
682 #endif /* CONFIG_CIFS_POSIX */
683 .llseek = cifs_llseek,
684 #ifdef CONFIG_CIFS_EXPERIMENTAL
685 .dir_notify = cifs_dir_notify,
686 #endif /* CONFIG_CIFS_EXPERIMENTAL */
689 const struct file_operations cifs_dir_ops = {
690 .readdir = cifs_readdir,
691 .release = cifs_closedir,
692 .read = generic_read_dir,
693 #ifdef CONFIG_CIFS_EXPERIMENTAL
694 .dir_notify = cifs_dir_notify,
695 #endif /* CONFIG_CIFS_EXPERIMENTAL */
700 cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
702 struct cifsInodeInfo *cifsi = inode;
704 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
705 SLAB_CTOR_CONSTRUCTOR) {
706 inode_init_once(&cifsi->vfs_inode);
707 INIT_LIST_HEAD(&cifsi->lockList);
712 cifs_init_inodecache(void)
714 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
715 sizeof (struct cifsInodeInfo),
716 0, (SLAB_RECLAIM_ACCOUNT|
718 cifs_init_once, NULL);
719 if (cifs_inode_cachep == NULL)
726 cifs_destroy_inodecache(void)
728 kmem_cache_destroy(cifs_inode_cachep);
732 cifs_init_request_bufs(void)
734 if (CIFSMaxBufSize < 8192) {
735 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
736 Unicode path name has to fit in any SMB/CIFS path based frames */
737 CIFSMaxBufSize = 8192;
738 } else if (CIFSMaxBufSize > 1024*127) {
739 CIFSMaxBufSize = 1024 * 127;
741 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
743 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
744 cifs_req_cachep = kmem_cache_create("cifs_request",
746 MAX_CIFS_HDR_SIZE, 0,
747 SLAB_HWCACHE_ALIGN, NULL, NULL);
748 if (cifs_req_cachep == NULL)
751 if (cifs_min_rcv < 1)
753 else if (cifs_min_rcv > 64) {
755 cERROR(1,("cifs_min_rcv set to maximum (64)"));
758 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
761 if (cifs_req_poolp == NULL) {
762 kmem_cache_destroy(cifs_req_cachep);
765 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
766 almost all handle based requests (but not write response, nor is it
767 sufficient for path based requests). A smaller size would have
768 been more efficient (compacting multiple slab items on one 4k page)
769 for the case in which debug was on, but this larger size allows
770 more SMBs to use small buffer alloc and is still much more
771 efficient to alloc 1 per page off the slab compared to 17K (5page)
772 alloc of large cifs buffers even when page debugging is on */
773 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
774 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
776 if (cifs_sm_req_cachep == NULL) {
777 mempool_destroy(cifs_req_poolp);
778 kmem_cache_destroy(cifs_req_cachep);
782 if (cifs_min_small < 2)
784 else if (cifs_min_small > 256) {
785 cifs_min_small = 256;
786 cFYI(1,("cifs_min_small set to maximum (256)"));
789 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
792 if (cifs_sm_req_poolp == NULL) {
793 mempool_destroy(cifs_req_poolp);
794 kmem_cache_destroy(cifs_req_cachep);
795 kmem_cache_destroy(cifs_sm_req_cachep);
803 cifs_destroy_request_bufs(void)
805 mempool_destroy(cifs_req_poolp);
806 kmem_cache_destroy(cifs_req_cachep);
807 mempool_destroy(cifs_sm_req_poolp);
808 kmem_cache_destroy(cifs_sm_req_cachep);
814 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
815 sizeof (struct mid_q_entry), 0,
816 SLAB_HWCACHE_ALIGN, NULL, NULL);
817 if (cifs_mid_cachep == NULL)
820 /* 3 is a reasonable minimum number of simultaneous operations */
821 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
822 if (cifs_mid_poolp == NULL) {
823 kmem_cache_destroy(cifs_mid_cachep);
827 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
828 sizeof (struct oplock_q_entry), 0,
829 SLAB_HWCACHE_ALIGN, NULL, NULL);
830 if (cifs_oplock_cachep == NULL) {
831 kmem_cache_destroy(cifs_mid_cachep);
832 mempool_destroy(cifs_mid_poolp);
840 cifs_destroy_mids(void)
842 mempool_destroy(cifs_mid_poolp);
843 kmem_cache_destroy(cifs_mid_cachep);
844 kmem_cache_destroy(cifs_oplock_cachep);
847 static int cifs_oplock_thread(void * dummyarg)
849 struct oplock_q_entry * oplock_item;
850 struct cifsTconInfo *pTcon;
851 struct inode * inode;
859 spin_lock(&GlobalMid_Lock);
860 if (list_empty(&GlobalOplock_Q)) {
861 spin_unlock(&GlobalMid_Lock);
862 set_current_state(TASK_INTERRUPTIBLE);
863 schedule_timeout(39*HZ);
865 oplock_item = list_entry(GlobalOplock_Q.next,
866 struct oplock_q_entry, qhead);
868 cFYI(1,("found oplock item to write out"));
869 pTcon = oplock_item->tcon;
870 inode = oplock_item->pinode;
871 netfid = oplock_item->netfid;
872 spin_unlock(&GlobalMid_Lock);
873 DeleteOplockQEntry(oplock_item);
874 /* can not grab inode sem here since it would
875 deadlock when oplock received on delete
876 since vfs_unlink holds the i_mutex across
878 /* mutex_lock(&inode->i_mutex);*/
879 if (S_ISREG(inode->i_mode)) {
880 rc = filemap_fdatawrite(inode->i_mapping);
881 if (CIFS_I(inode)->clientCanCacheRead == 0) {
882 filemap_fdatawait(inode->i_mapping);
883 invalidate_remote_inode(inode);
887 /* mutex_unlock(&inode->i_mutex);*/
889 CIFS_I(inode)->write_behind_rc = rc;
890 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
892 /* releasing a stale oplock after recent reconnection
893 of smb session using a now incorrect file
894 handle is not a data integrity issue but do
895 not bother sending an oplock release if session
896 to server still is disconnected since oplock
897 already released by the server in that case */
898 if (pTcon->tidStatus != CifsNeedReconnect) {
899 rc = CIFSSMBLock(0, pTcon, netfid,
900 0 /* len */ , 0 /* offset */, 0,
901 0, LOCKING_ANDX_OPLOCK_RELEASE,
903 cFYI(1,("Oplock release rc = %d ",rc));
906 spin_unlock(&GlobalMid_Lock);
907 set_current_state(TASK_INTERRUPTIBLE);
908 schedule_timeout(1); /* yield in case q were corrupt */
910 } while (!kthread_should_stop());
915 static int cifs_dnotify_thread(void * dummyarg)
917 struct list_head *tmp;
918 struct cifsSesInfo *ses;
923 set_current_state(TASK_INTERRUPTIBLE);
924 schedule_timeout(15*HZ);
925 read_lock(&GlobalSMBSeslock);
926 /* check if any stuck requests that need
927 to be woken up and wakeq so the
928 thread can wake up and error out */
929 list_for_each(tmp, &GlobalSMBSessionList) {
930 ses = list_entry(tmp, struct cifsSesInfo,
932 if (ses && ses->server &&
933 atomic_read(&ses->server->inFlight))
934 wake_up_all(&ses->server->response_q);
936 read_unlock(&GlobalSMBSeslock);
937 } while (!kthread_should_stop());
946 #ifdef CONFIG_PROC_FS
949 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
950 INIT_LIST_HEAD(&GlobalSMBSessionList);
951 INIT_LIST_HEAD(&GlobalTreeConnectionList);
952 INIT_LIST_HEAD(&GlobalOplock_Q);
953 #ifdef CONFIG_CIFS_EXPERIMENTAL
954 INIT_LIST_HEAD(&GlobalDnotifyReqList);
955 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
958 * Initialize Global counters
960 atomic_set(&sesInfoAllocCount, 0);
961 atomic_set(&tconInfoAllocCount, 0);
962 atomic_set(&tcpSesAllocCount,0);
963 atomic_set(&tcpSesReconnectCount, 0);
964 atomic_set(&tconInfoReconnectCount, 0);
966 atomic_set(&bufAllocCount, 0);
967 atomic_set(&smBufAllocCount, 0);
968 #ifdef CONFIG_CIFS_STATS2
969 atomic_set(&totBufAllocCount, 0);
970 atomic_set(&totSmBufAllocCount, 0);
971 #endif /* CONFIG_CIFS_STATS2 */
973 atomic_set(&midCount, 0);
974 GlobalCurrentXid = 0;
975 GlobalTotalActiveXid = 0;
976 GlobalMaxActiveXid = 0;
977 memset(Local_System_Name, 0, 15);
978 rwlock_init(&GlobalSMBSeslock);
979 spin_lock_init(&GlobalMid_Lock);
981 if (cifs_max_pending < 2) {
982 cifs_max_pending = 2;
983 cFYI(1,("cifs_max_pending set to min of 2"));
984 } else if (cifs_max_pending > 256) {
985 cifs_max_pending = 256;
986 cFYI(1,("cifs_max_pending set to max of 256"));
989 rc = cifs_init_inodecache();
993 rc = cifs_init_mids();
995 goto out_destroy_inodecache;
997 rc = cifs_init_request_bufs();
999 goto out_destroy_mids;
1001 rc = register_filesystem(&cifs_fs_type);
1003 goto out_destroy_request_bufs;
1005 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1006 if (IS_ERR(oplockThread)) {
1007 rc = PTR_ERR(oplockThread);
1008 cERROR(1,("error %d create oplock thread", rc));
1009 goto out_unregister_filesystem;
1012 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1013 if (IS_ERR(dnotifyThread)) {
1014 rc = PTR_ERR(dnotifyThread);
1015 cERROR(1,("error %d create dnotify thread", rc));
1016 goto out_stop_oplock_thread;
1021 out_stop_oplock_thread:
1022 kthread_stop(oplockThread);
1023 out_unregister_filesystem:
1024 unregister_filesystem(&cifs_fs_type);
1025 out_destroy_request_bufs:
1026 cifs_destroy_request_bufs();
1028 cifs_destroy_mids();
1029 out_destroy_inodecache:
1030 cifs_destroy_inodecache();
1032 #ifdef CONFIG_PROC_FS
1041 cFYI(0, ("In unregister ie exit_cifs"));
1042 #ifdef CONFIG_PROC_FS
1045 unregister_filesystem(&cifs_fs_type);
1046 cifs_destroy_inodecache();
1047 cifs_destroy_mids();
1048 cifs_destroy_request_bufs();
1049 kthread_stop(oplockThread);
1050 kthread_stop(dnotifyThread);
1053 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1054 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1056 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1057 MODULE_VERSION(CIFS_VERSION);
1058 module_init(init_cifs)
1059 module_exit(exit_cifs)