cifs: fix buffer format byte on NT Rename/hardlink
[linux-2.6-block.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14
SF
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2007
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
1da177e4
LT
33#include <asm/div64.h>
34#include "cifsfs.h"
35#include "cifspdu.h"
36#include "cifsglob.h"
37#include "cifsproto.h"
38#include "cifs_unicode.h"
39#include "cifs_debug.h"
40#include "cifs_fs_sb.h"
41
42static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
45{
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
fb8c4b14 48 private_data->pid = current->tgid;
1da177e4 49 init_MUTEX(&private_data->fh_sem);
796e5661 50 mutex_init(&private_data->lock_mutex);
7ee1af76 51 INIT_LIST_HEAD(&private_data->llist);
1da177e4
LT
52 private_data->pfile = file; /* needed for writepage */
53 private_data->pInode = inode;
4b18f2a9
SF
54 private_data->invalidHandle = false;
55 private_data->closePend = false;
23e7dd7d
SF
56 /* we have to track num writers to the inode, since writepages
57 does not tell us which handle the write is for so there can
58 be a close (overlapping with write) of the filehandle that
59 cifs_writepages chose to use */
fb8c4b14 60 atomic_set(&private_data->wrtPending, 0);
1da177e4
LT
61
62 return private_data;
63}
64
65static inline int cifs_convert_flags(unsigned int flags)
66{
67 if ((flags & O_ACCMODE) == O_RDONLY)
68 return GENERIC_READ;
69 else if ((flags & O_ACCMODE) == O_WRONLY)
70 return GENERIC_WRITE;
71 else if ((flags & O_ACCMODE) == O_RDWR) {
72 /* GENERIC_ALL is too much permission to request
73 can cause unnecessary access denied on create */
74 /* return GENERIC_ALL; */
75 return (GENERIC_READ | GENERIC_WRITE);
76 }
77
e10f7b55
JL
78 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
79 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
80 FILE_READ_DATA);
7fc8f4e9 81}
e10f7b55 82
7fc8f4e9
SF
83static inline fmode_t cifs_posix_convert_flags(unsigned int flags)
84{
85 fmode_t posix_flags = 0;
e10f7b55 86
7fc8f4e9
SF
87 if ((flags & O_ACCMODE) == O_RDONLY)
88 posix_flags = FMODE_READ;
89 else if ((flags & O_ACCMODE) == O_WRONLY)
90 posix_flags = FMODE_WRITE;
91 else if ((flags & O_ACCMODE) == O_RDWR) {
92 /* GENERIC_ALL is too much permission to request
93 can cause unnecessary access denied on create */
94 /* return GENERIC_ALL; */
95 posix_flags = FMODE_READ | FMODE_WRITE;
96 }
97 /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
98 reopening a file. They had their effect on the original open */
99 if (flags & O_APPEND)
100 posix_flags |= (fmode_t)O_APPEND;
101 if (flags & O_SYNC)
102 posix_flags |= (fmode_t)O_SYNC;
103 if (flags & O_DIRECTORY)
104 posix_flags |= (fmode_t)O_DIRECTORY;
105 if (flags & O_NOFOLLOW)
106 posix_flags |= (fmode_t)O_NOFOLLOW;
107 if (flags & O_DIRECT)
108 posix_flags |= (fmode_t)O_DIRECT;
109
110 return posix_flags;
1da177e4
LT
111}
112
113static inline int cifs_get_disposition(unsigned int flags)
114{
115 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
116 return FILE_CREATE;
117 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
118 return FILE_OVERWRITE_IF;
119 else if ((flags & O_CREAT) == O_CREAT)
120 return FILE_OPEN_IF;
55aa2e09
SF
121 else if ((flags & O_TRUNC) == O_TRUNC)
122 return FILE_OVERWRITE;
1da177e4
LT
123 else
124 return FILE_OPEN;
125}
126
127/* all arguments to this function must be checked for validity in caller */
128static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
129 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
130 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
131 char *full_path, int xid)
132{
133 struct timespec temp;
134 int rc;
135
136 /* want handles we can use to read with first
137 in the list so we do not have to walk the
d9414774 138 list to search for one in write_begin */
1da177e4 139 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
fb8c4b14 140 list_add_tail(&pCifsFile->flist,
1da177e4
LT
141 &pCifsInode->openFileList);
142 } else {
143 list_add(&pCifsFile->flist,
144 &pCifsInode->openFileList);
145 }
146 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
147 if (pCifsInode->clientCanCacheRead) {
148 /* we have the inode open somewhere else
149 no need to discard cache data */
150 goto client_can_cache;
151 }
152
153 /* BB need same check in cifs_create too? */
154 /* if not oplocked, invalidate inode pages if mtime or file
155 size changed */
156 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
e6a00296
JJS
157 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
158 (file->f_path.dentry->d_inode->i_size ==
1da177e4
LT
159 (loff_t)le64_to_cpu(buf->EndOfFile))) {
160 cFYI(1, ("inode unchanged on server"));
161 } else {
e6a00296 162 if (file->f_path.dentry->d_inode->i_mapping) {
1da177e4
LT
163 /* BB no need to lock inode until after invalidate
164 since namei code should already have it locked? */
cea21805
JL
165 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
166 if (rc != 0)
167 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
1da177e4
LT
168 }
169 cFYI(1, ("invalidating remote inode since open detected it "
170 "changed"));
e6a00296 171 invalidate_remote_inode(file->f_path.dentry->d_inode);
1da177e4
LT
172 }
173
174client_can_cache:
c18c842b 175 if (pTcon->unix_ext)
e6a00296 176 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
1da177e4
LT
177 full_path, inode->i_sb, xid);
178 else
e6a00296 179 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
8b1327f6 180 full_path, buf, inode->i_sb, xid, NULL);
1da177e4
LT
181
182 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
183 pCifsInode->clientCanCacheAll = true;
184 pCifsInode->clientCanCacheRead = true;
1da177e4 185 cFYI(1, ("Exclusive Oplock granted on inode %p",
e6a00296 186 file->f_path.dentry->d_inode));
1da177e4 187 } else if ((*oplock & 0xF) == OPLOCK_READ)
4b18f2a9 188 pCifsInode->clientCanCacheRead = true;
1da177e4
LT
189
190 return rc;
191}
192
193int cifs_open(struct inode *inode, struct file *file)
194{
195 int rc = -EACCES;
196 int xid, oplock;
197 struct cifs_sb_info *cifs_sb;
198 struct cifsTconInfo *pTcon;
199 struct cifsFileInfo *pCifsFile;
200 struct cifsInodeInfo *pCifsInode;
201 struct list_head *tmp;
202 char *full_path = NULL;
203 int desiredAccess;
204 int disposition;
205 __u16 netfid;
206 FILE_ALL_INFO *buf = NULL;
207
208 xid = GetXid();
209
210 cifs_sb = CIFS_SB(inode->i_sb);
211 pTcon = cifs_sb->tcon;
212
213 if (file->f_flags & O_CREAT) {
214 /* search inode for this file and fill in file->private_data */
e6a00296 215 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
1da177e4
LT
216 read_lock(&GlobalSMBSeslock);
217 list_for_each(tmp, &pCifsInode->openFileList) {
218 pCifsFile = list_entry(tmp, struct cifsFileInfo,
219 flist);
220 if ((pCifsFile->pfile == NULL) &&
221 (pCifsFile->pid == current->tgid)) {
222 /* mode set in cifs_create */
223
224 /* needed for writepage */
225 pCifsFile->pfile = file;
50c2f753 226
1da177e4
LT
227 file->private_data = pCifsFile;
228 break;
229 }
230 }
231 read_unlock(&GlobalSMBSeslock);
232 if (file->private_data != NULL) {
233 rc = 0;
234 FreeXid(xid);
235 return rc;
236 } else {
237 if (file->f_flags & O_EXCL)
238 cERROR(1, ("could not find file instance for "
26a21b98 239 "new file %p", file));
1da177e4
LT
240 }
241 }
242
e6a00296 243 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4
LT
244 if (full_path == NULL) {
245 FreeXid(xid);
246 return -ENOMEM;
247 }
248
7521a3c5 249 cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
1da177e4
LT
250 inode, file->f_flags, full_path));
251 desiredAccess = cifs_convert_flags(file->f_flags);
252
253/*********************************************************************
254 * open flag mapping table:
fb8c4b14 255 *
1da177e4 256 * POSIX Flag CIFS Disposition
fb8c4b14 257 * ---------- ----------------
1da177e4
LT
258 * O_CREAT FILE_OPEN_IF
259 * O_CREAT | O_EXCL FILE_CREATE
260 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
261 * O_TRUNC FILE_OVERWRITE
262 * none of the above FILE_OPEN
263 *
264 * Note that there is not a direct match between disposition
fb8c4b14 265 * FILE_SUPERSEDE (ie create whether or not file exists although
1da177e4
LT
266 * O_CREAT | O_TRUNC is similar but truncates the existing
267 * file rather than creating a new file as FILE_SUPERSEDE does
268 * (which uses the attributes / metadata passed in on open call)
269 *?
fb8c4b14 270 *? O_SYNC is a reasonable match to CIFS writethrough flag
1da177e4
LT
271 *? and the read write flags match reasonably. O_LARGEFILE
272 *? is irrelevant because largefile support is always used
273 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
274 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
275 *********************************************************************/
276
277 disposition = cifs_get_disposition(file->f_flags);
278
279 if (oplockEnabled)
280 oplock = REQ_OPLOCK;
281 else
4b18f2a9 282 oplock = 0;
1da177e4
LT
283
284 /* BB pass O_SYNC flag through on file attributes .. BB */
285
286 /* Also refresh inode by passing in file_info buf returned by SMBOpen
287 and calling get_inode_info with returned buf (at least helps
288 non-Unix server case) */
289
fb8c4b14
SF
290 /* BB we can not do this if this is the second open of a file
291 and the first handle has writebehind data, we might be
1da177e4
LT
292 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
293 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
294 if (!buf) {
295 rc = -ENOMEM;
296 goto out;
297 }
5bafd765
SF
298
299 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
fb8c4b14 300 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
5bafd765 301 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
302 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
303 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
304 else
305 rc = -EIO; /* no NT SMB support fall into legacy open below */
306
a9d02ad4
SF
307 if (rc == -EIO) {
308 /* Old server, try legacy style OpenX */
309 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
310 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
311 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
312 & CIFS_MOUNT_MAP_SPECIAL_CHR);
313 }
1da177e4 314 if (rc) {
26a21b98 315 cFYI(1, ("cifs_open returned 0x%x", rc));
1da177e4
LT
316 goto out;
317 }
318 file->private_data =
319 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
320 if (file->private_data == NULL) {
321 rc = -ENOMEM;
322 goto out;
323 }
324 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
1da177e4
LT
325 write_lock(&GlobalSMBSeslock);
326 list_add(&pCifsFile->tlist, &pTcon->openFileList);
327
e6a00296 328 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
1da177e4
LT
329 if (pCifsInode) {
330 rc = cifs_open_inode_helper(inode, file, pCifsInode,
331 pCifsFile, pTcon,
332 &oplock, buf, full_path, xid);
333 } else {
334 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
335 }
336
fb8c4b14 337 if (oplock & CIFS_CREATE_ACTION) {
1da177e4
LT
338 /* time to set mode which we can not set earlier due to
339 problems creating new read-only files */
c18c842b 340 if (pTcon->unix_ext) {
4e1e7fb9
JL
341 struct cifs_unix_set_info_args args = {
342 .mode = inode->i_mode,
343 .uid = NO_CHANGE_64,
344 .gid = NO_CHANGE_64,
345 .ctime = NO_CHANGE_64,
346 .atime = NO_CHANGE_64,
347 .mtime = NO_CHANGE_64,
348 .device = 0,
349 };
350 CIFSSMBUnixSetInfo(xid, pTcon, full_path, &args,
737b758c 351 cifs_sb->local_nls,
fb8c4b14 352 cifs_sb->mnt_cifs_flags &
737b758c 353 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
354 }
355 }
356
357out:
358 kfree(buf);
359 kfree(full_path);
360 FreeXid(xid);
361 return rc;
362}
363
0418726b 364/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
365/* to server was lost */
366static int cifs_relock_file(struct cifsFileInfo *cifsFile)
367{
368 int rc = 0;
369
370/* BB list all locks open on this file and relock */
371
372 return rc;
373}
374
4b18f2a9 375static int cifs_reopen_file(struct file *file, bool can_flush)
1da177e4
LT
376{
377 int rc = -EACCES;
378 int xid, oplock;
379 struct cifs_sb_info *cifs_sb;
7fc8f4e9 380 struct cifsTconInfo *tcon;
1da177e4
LT
381 struct cifsFileInfo *pCifsFile;
382 struct cifsInodeInfo *pCifsInode;
fb8c4b14 383 struct inode *inode;
1da177e4
LT
384 char *full_path = NULL;
385 int desiredAccess;
386 int disposition = FILE_OPEN;
387 __u16 netfid;
388
ad7a2926 389 if (file->private_data)
1da177e4 390 pCifsFile = (struct cifsFileInfo *)file->private_data;
ad7a2926 391 else
1da177e4
LT
392 return -EBADF;
393
394 xid = GetXid();
395 down(&pCifsFile->fh_sem);
4b18f2a9 396 if (!pCifsFile->invalidHandle) {
1da177e4
LT
397 up(&pCifsFile->fh_sem);
398 FreeXid(xid);
399 return 0;
400 }
401
e6a00296 402 if (file->f_path.dentry == NULL) {
3a9f462f
SF
403 cERROR(1, ("no valid name if dentry freed"));
404 dump_stack();
405 rc = -EBADF;
406 goto reopen_error_exit;
407 }
408
409 inode = file->f_path.dentry->d_inode;
fb8c4b14 410 if (inode == NULL) {
3a9f462f
SF
411 cERROR(1, ("inode not valid"));
412 dump_stack();
413 rc = -EBADF;
414 goto reopen_error_exit;
1da177e4 415 }
50c2f753 416
1da177e4 417 cifs_sb = CIFS_SB(inode->i_sb);
7fc8f4e9 418 tcon = cifs_sb->tcon;
3a9f462f 419
1da177e4
LT
420/* can not grab rename sem here because various ops, including
421 those that already have the rename sem can end up causing writepage
422 to get called and if the server was down that means we end up here,
423 and we can never tell if the caller already has the rename_sem */
e6a00296 424 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 425 if (full_path == NULL) {
3a9f462f
SF
426 rc = -ENOMEM;
427reopen_error_exit:
1da177e4
LT
428 up(&pCifsFile->fh_sem);
429 FreeXid(xid);
3a9f462f 430 return rc;
1da177e4
LT
431 }
432
3a9f462f 433 cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
fb8c4b14 434 inode, file->f_flags, full_path));
1da177e4
LT
435
436 if (oplockEnabled)
437 oplock = REQ_OPLOCK;
438 else
4b18f2a9 439 oplock = 0;
1da177e4 440
7fc8f4e9
SF
441 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
442 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
443 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
444 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
445 /* can not refresh inode info since size could be stale */
446 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
447 cifs_sb->mnt_file_mode /* ignored */,
448 oflags, &oplock, &netfid, xid);
449 if (rc == 0) {
450 cFYI(1, ("posix reopen succeeded"));
451 goto reopen_success;
452 }
453 /* fallthrough to retry open the old way on errors, especially
454 in the reconnect path it is important to retry hard */
455 }
456
457 desiredAccess = cifs_convert_flags(file->f_flags);
458
1da177e4 459 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
460 by SMBOpen and then calling get_inode_info with returned buf
461 since file might have write behind data that needs to be flushed
1da177e4
LT
462 and server version of file size can be stale. If we knew for sure
463 that inode was not dirty locally we could do this */
464
7fc8f4e9 465 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 466 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 467 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 468 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
469 if (rc) {
470 up(&pCifsFile->fh_sem);
26a21b98
SF
471 cFYI(1, ("cifs_open returned 0x%x", rc));
472 cFYI(1, ("oplock: %d", oplock));
1da177e4 473 } else {
7fc8f4e9 474reopen_success:
1da177e4 475 pCifsFile->netfid = netfid;
4b18f2a9 476 pCifsFile->invalidHandle = false;
1da177e4
LT
477 up(&pCifsFile->fh_sem);
478 pCifsInode = CIFS_I(inode);
479 if (pCifsInode) {
480 if (can_flush) {
cea21805
JL
481 rc = filemap_write_and_wait(inode->i_mapping);
482 if (rc != 0)
483 CIFS_I(inode)->write_behind_rc = rc;
1da177e4
LT
484 /* temporarily disable caching while we
485 go to server to get inode info */
4b18f2a9
SF
486 pCifsInode->clientCanCacheAll = false;
487 pCifsInode->clientCanCacheRead = false;
7fc8f4e9 488 if (tcon->unix_ext)
1da177e4
LT
489 rc = cifs_get_inode_info_unix(&inode,
490 full_path, inode->i_sb, xid);
491 else
492 rc = cifs_get_inode_info(&inode,
493 full_path, NULL, inode->i_sb,
8b1327f6 494 xid, NULL);
1da177e4
LT
495 } /* else we are writing out data to server already
496 and could deadlock if we tried to flush data, and
497 since we do not know if we have data that would
498 invalidate the current end of file on the server
499 we can not go to the server to get the new inod
500 info */
501 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
502 pCifsInode->clientCanCacheAll = true;
503 pCifsInode->clientCanCacheRead = true;
1da177e4 504 cFYI(1, ("Exclusive Oplock granted on inode %p",
e6a00296 505 file->f_path.dentry->d_inode));
1da177e4 506 } else if ((oplock & 0xF) == OPLOCK_READ) {
4b18f2a9
SF
507 pCifsInode->clientCanCacheRead = true;
508 pCifsInode->clientCanCacheAll = false;
1da177e4 509 } else {
4b18f2a9
SF
510 pCifsInode->clientCanCacheRead = false;
511 pCifsInode->clientCanCacheAll = false;
1da177e4
LT
512 }
513 cifs_relock_file(pCifsFile);
514 }
515 }
1da177e4
LT
516 kfree(full_path);
517 FreeXid(xid);
518 return rc;
519}
520
521int cifs_close(struct inode *inode, struct file *file)
522{
523 int rc = 0;
15745320 524 int xid, timeout;
1da177e4
LT
525 struct cifs_sb_info *cifs_sb;
526 struct cifsTconInfo *pTcon;
527 struct cifsFileInfo *pSMBFile =
528 (struct cifsFileInfo *)file->private_data;
529
530 xid = GetXid();
531
532 cifs_sb = CIFS_SB(inode->i_sb);
533 pTcon = cifs_sb->tcon;
534 if (pSMBFile) {
7ee1af76 535 struct cifsLockInfo *li, *tmp;
ddb4cbfc 536 write_lock(&GlobalSMBSeslock);
4b18f2a9 537 pSMBFile->closePend = true;
1da177e4
LT
538 if (pTcon) {
539 /* no sense reconnecting to close a file that is
540 already closed */
3b795210 541 if (!pTcon->need_reconnect) {
ddb4cbfc 542 write_unlock(&GlobalSMBSeslock);
15745320 543 timeout = 2;
fb8c4b14 544 while ((atomic_read(&pSMBFile->wrtPending) != 0)
15745320 545 && (timeout <= 2048)) {
23e7dd7d
SF
546 /* Give write a better chance to get to
547 server ahead of the close. We do not
548 want to add a wait_q here as it would
549 increase the memory utilization as
550 the struct would be in each open file,
fb8c4b14 551 but this should give enough time to
23e7dd7d 552 clear the socket */
90c81e0b
SF
553 cFYI(DBG2,
554 ("close delay, write pending"));
23e7dd7d
SF
555 msleep(timeout);
556 timeout *= 4;
4891d539 557 }
fb8c4b14 558 if (atomic_read(&pSMBFile->wrtPending))
ddb4cbfc
SF
559 cERROR(1, ("close with pending write"));
560 if (!pTcon->need_reconnect &&
561 !pSMBFile->invalidHandle)
562 rc = CIFSSMBClose(xid, pTcon,
1da177e4 563 pSMBFile->netfid);
ddb4cbfc
SF
564 } else
565 write_unlock(&GlobalSMBSeslock);
566 } else
567 write_unlock(&GlobalSMBSeslock);
7ee1af76
JA
568
569 /* Delete any outstanding lock records.
570 We'll lose them when the file is closed anyway. */
796e5661 571 mutex_lock(&pSMBFile->lock_mutex);
7ee1af76
JA
572 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
573 list_del(&li->llist);
574 kfree(li);
575 }
796e5661 576 mutex_unlock(&pSMBFile->lock_mutex);
7ee1af76 577
cbe0476f 578 write_lock(&GlobalSMBSeslock);
1da177e4
LT
579 list_del(&pSMBFile->flist);
580 list_del(&pSMBFile->tlist);
cbe0476f 581 write_unlock(&GlobalSMBSeslock);
15745320
SF
582 timeout = 10;
583 /* We waited above to give the SMBWrite a chance to issue
584 on the wire (so we do not get SMBWrite returning EBADF
585 if writepages is racing with close. Note that writepages
586 does not specify a file handle, so it is possible for a file
587 to be opened twice, and the application close the "wrong"
588 file handle - in these cases we delay long enough to allow
589 the SMBWrite to get on the wire before the SMB Close.
590 We allow total wait here over 45 seconds, more than
591 oplock break time, and more than enough to allow any write
592 to complete on the server, or to time out on the client */
593 while ((atomic_read(&pSMBFile->wrtPending) != 0)
594 && (timeout <= 50000)) {
595 cERROR(1, ("writes pending, delay free of handle"));
596 msleep(timeout);
597 timeout *= 8;
598 }
1da177e4
LT
599 kfree(file->private_data);
600 file->private_data = NULL;
601 } else
602 rc = -EBADF;
603
4efa53f0 604 read_lock(&GlobalSMBSeslock);
1da177e4
LT
605 if (list_empty(&(CIFS_I(inode)->openFileList))) {
606 cFYI(1, ("closing last open instance for inode %p", inode));
607 /* if the file is not open we do not know if we can cache info
608 on this inode, much less write behind and read ahead */
4b18f2a9
SF
609 CIFS_I(inode)->clientCanCacheRead = false;
610 CIFS_I(inode)->clientCanCacheAll = false;
1da177e4 611 }
4efa53f0 612 read_unlock(&GlobalSMBSeslock);
fb8c4b14 613 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
1da177e4
LT
614 rc = CIFS_I(inode)->write_behind_rc;
615 FreeXid(xid);
616 return rc;
617}
618
619int cifs_closedir(struct inode *inode, struct file *file)
620{
621 int rc = 0;
622 int xid;
623 struct cifsFileInfo *pCFileStruct =
624 (struct cifsFileInfo *)file->private_data;
625 char *ptmp;
626
26a21b98 627 cFYI(1, ("Closedir inode = 0x%p", inode));
1da177e4
LT
628
629 xid = GetXid();
630
631 if (pCFileStruct) {
632 struct cifsTconInfo *pTcon;
fb8c4b14
SF
633 struct cifs_sb_info *cifs_sb =
634 CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
635
636 pTcon = cifs_sb->tcon;
637
638 cFYI(1, ("Freeing private data in close dir"));
ddb4cbfc 639 write_lock(&GlobalSMBSeslock);
4b18f2a9
SF
640 if (!pCFileStruct->srch_inf.endOfSearch &&
641 !pCFileStruct->invalidHandle) {
642 pCFileStruct->invalidHandle = true;
ddb4cbfc 643 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
644 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
645 cFYI(1, ("Closing uncompleted readdir with rc %d",
646 rc));
647 /* not much we can do if it fails anyway, ignore rc */
648 rc = 0;
ddb4cbfc
SF
649 } else
650 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
651 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
652 if (ptmp) {
ec637e3f 653 cFYI(1, ("closedir free smb buf in srch struct"));
1da177e4 654 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 655 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
656 cifs_small_buf_release(ptmp);
657 else
658 cifs_buf_release(ptmp);
1da177e4 659 }
1da177e4
LT
660 kfree(file->private_data);
661 file->private_data = NULL;
662 }
663 /* BB can we lock the filestruct while this is going on? */
664 FreeXid(xid);
665 return rc;
666}
667
7ee1af76
JA
668static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
669 __u64 offset, __u8 lockType)
670{
fb8c4b14
SF
671 struct cifsLockInfo *li =
672 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
673 if (li == NULL)
674 return -ENOMEM;
675 li->offset = offset;
676 li->length = len;
677 li->type = lockType;
796e5661 678 mutex_lock(&fid->lock_mutex);
7ee1af76 679 list_add(&li->llist, &fid->llist);
796e5661 680 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
681 return 0;
682}
683
1da177e4
LT
684int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
685{
686 int rc, xid;
1da177e4
LT
687 __u32 numLock = 0;
688 __u32 numUnlock = 0;
689 __u64 length;
4b18f2a9 690 bool wait_flag = false;
1da177e4 691 struct cifs_sb_info *cifs_sb;
13a6e42a 692 struct cifsTconInfo *tcon;
08547b03
SF
693 __u16 netfid;
694 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 695 bool posix_locking = 0;
1da177e4
LT
696
697 length = 1 + pfLock->fl_end - pfLock->fl_start;
698 rc = -EACCES;
699 xid = GetXid();
700
701 cFYI(1, ("Lock parm: 0x%x flockflags: "
702 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14
SF
703 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
704 pfLock->fl_end));
1da177e4
LT
705
706 if (pfLock->fl_flags & FL_POSIX)
d47d7c1a 707 cFYI(1, ("Posix"));
1da177e4 708 if (pfLock->fl_flags & FL_FLOCK)
d47d7c1a 709 cFYI(1, ("Flock"));
1da177e4 710 if (pfLock->fl_flags & FL_SLEEP) {
d47d7c1a 711 cFYI(1, ("Blocking lock"));
4b18f2a9 712 wait_flag = true;
1da177e4
LT
713 }
714 if (pfLock->fl_flags & FL_ACCESS)
715 cFYI(1, ("Process suspended by mandatory locking - "
26a21b98 716 "not implemented yet"));
1da177e4
LT
717 if (pfLock->fl_flags & FL_LEASE)
718 cFYI(1, ("Lease on file - not implemented yet"));
fb8c4b14 719 if (pfLock->fl_flags &
1da177e4
LT
720 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
721 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
722
723 if (pfLock->fl_type == F_WRLCK) {
724 cFYI(1, ("F_WRLCK "));
725 numLock = 1;
726 } else if (pfLock->fl_type == F_UNLCK) {
d47d7c1a 727 cFYI(1, ("F_UNLCK"));
1da177e4 728 numUnlock = 1;
d47d7c1a
SF
729 /* Check if unlock includes more than
730 one lock range */
1da177e4 731 } else if (pfLock->fl_type == F_RDLCK) {
d47d7c1a 732 cFYI(1, ("F_RDLCK"));
1da177e4
LT
733 lockType |= LOCKING_ANDX_SHARED_LOCK;
734 numLock = 1;
735 } else if (pfLock->fl_type == F_EXLCK) {
d47d7c1a 736 cFYI(1, ("F_EXLCK"));
1da177e4
LT
737 numLock = 1;
738 } else if (pfLock->fl_type == F_SHLCK) {
d47d7c1a 739 cFYI(1, ("F_SHLCK"));
1da177e4
LT
740 lockType |= LOCKING_ANDX_SHARED_LOCK;
741 numLock = 1;
742 } else
d47d7c1a 743 cFYI(1, ("Unknown type of lock"));
1da177e4 744
e6a00296 745 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13a6e42a 746 tcon = cifs_sb->tcon;
1da177e4
LT
747
748 if (file->private_data == NULL) {
749 FreeXid(xid);
750 return -EBADF;
751 }
08547b03
SF
752 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
753
13a6e42a
SF
754 if ((tcon->ses->capabilities & CAP_UNIX) &&
755 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 756 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 757 posix_locking = 1;
08547b03
SF
758 /* BB add code here to normalize offset and length to
759 account for negative length which we can not accept over the
760 wire */
1da177e4 761 if (IS_GETLK(cmd)) {
fb8c4b14 762 if (posix_locking) {
08547b03 763 int posix_lock_type;
fb8c4b14 764 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
765 posix_lock_type = CIFS_RDLCK;
766 else
767 posix_lock_type = CIFS_WRLCK;
13a6e42a 768 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 769 length, pfLock,
08547b03
SF
770 posix_lock_type, wait_flag);
771 FreeXid(xid);
772 return rc;
773 }
774
775 /* BB we could chain these into one lock request BB */
13a6e42a 776 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
08547b03 777 0, 1, lockType, 0 /* wait flag */ );
1da177e4 778 if (rc == 0) {
13a6e42a 779 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
780 pfLock->fl_start, 1 /* numUnlock */ ,
781 0 /* numLock */ , lockType,
782 0 /* wait flag */ );
783 pfLock->fl_type = F_UNLCK;
784 if (rc != 0)
785 cERROR(1, ("Error unlocking previously locked "
08547b03 786 "range %d during test of lock", rc));
1da177e4
LT
787 rc = 0;
788
789 } else {
790 /* if rc == ERR_SHARING_VIOLATION ? */
791 rc = 0; /* do not change lock type to unlock
792 since range in use */
793 }
794
795 FreeXid(xid);
796 return rc;
797 }
7ee1af76
JA
798
799 if (!numLock && !numUnlock) {
800 /* if no lock or unlock then nothing
801 to do since we do not know what it is */
802 FreeXid(xid);
803 return -EOPNOTSUPP;
804 }
805
806 if (posix_locking) {
08547b03 807 int posix_lock_type;
fb8c4b14 808 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
809 posix_lock_type = CIFS_RDLCK;
810 else
811 posix_lock_type = CIFS_WRLCK;
50c2f753 812
fb8c4b14 813 if (numUnlock == 1)
beb84dc8 814 posix_lock_type = CIFS_UNLCK;
7ee1af76 815
13a6e42a 816 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 817 length, pfLock,
08547b03 818 posix_lock_type, wait_flag);
7ee1af76 819 } else {
fb8c4b14
SF
820 struct cifsFileInfo *fid =
821 (struct cifsFileInfo *)file->private_data;
7ee1af76
JA
822
823 if (numLock) {
13a6e42a 824 rc = CIFSSMBLock(xid, tcon, netfid, length,
fb8c4b14 825 pfLock->fl_start,
7ee1af76
JA
826 0, numLock, lockType, wait_flag);
827
828 if (rc == 0) {
829 /* For Windows locks we must store them. */
830 rc = store_file_lock(fid, length,
831 pfLock->fl_start, lockType);
832 }
833 } else if (numUnlock) {
834 /* For each stored lock that this unlock overlaps
835 completely, unlock it. */
836 int stored_rc = 0;
837 struct cifsLockInfo *li, *tmp;
838
6b70c955 839 rc = 0;
796e5661 840 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
841 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
842 if (pfLock->fl_start <= li->offset &&
c19eb710 843 (pfLock->fl_start + length) >=
39db810c 844 (li->offset + li->length)) {
13a6e42a 845 stored_rc = CIFSSMBLock(xid, tcon,
fb8c4b14 846 netfid,
7ee1af76 847 li->length, li->offset,
4b18f2a9 848 1, 0, li->type, false);
7ee1af76
JA
849 if (stored_rc)
850 rc = stored_rc;
851
852 list_del(&li->llist);
853 kfree(li);
854 }
855 }
796e5661 856 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
857 }
858 }
859
d634cc15 860 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
861 posix_lock_file_wait(file, pfLock);
862 FreeXid(xid);
863 return rc;
864}
865
866ssize_t cifs_user_write(struct file *file, const char __user *write_data,
867 size_t write_size, loff_t *poffset)
868{
869 int rc = 0;
870 unsigned int bytes_written = 0;
871 unsigned int total_written;
872 struct cifs_sb_info *cifs_sb;
873 struct cifsTconInfo *pTcon;
874 int xid, long_op;
875 struct cifsFileInfo *open_file;
876
e6a00296 877 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
878
879 pTcon = cifs_sb->tcon;
880
881 /* cFYI(1,
882 (" write %d bytes to offset %lld of %s", write_size,
e6a00296 883 *poffset, file->f_path.dentry->d_name.name)); */
1da177e4
LT
884
885 if (file->private_data == NULL)
886 return -EBADF;
c33f8d32 887 open_file = (struct cifsFileInfo *) file->private_data;
50c2f753 888
838726c4
JL
889 rc = generic_write_checks(file, poffset, &write_size, 0);
890 if (rc)
891 return rc;
892
1da177e4 893 xid = GetXid();
1da177e4 894
e6a00296 895 if (*poffset > file->f_path.dentry->d_inode->i_size)
133672ef 896 long_op = CIFS_VLONG_OP; /* writes past EOF take long time */
1da177e4 897 else
133672ef 898 long_op = CIFS_LONG_OP;
1da177e4
LT
899
900 for (total_written = 0; write_size > total_written;
901 total_written += bytes_written) {
902 rc = -EAGAIN;
903 while (rc == -EAGAIN) {
904 if (file->private_data == NULL) {
905 /* file has been closed on us */
906 FreeXid(xid);
907 /* if we have gotten here we have written some data
908 and blocked, and the file has been freed on us while
909 we blocked so return what we managed to write */
910 return total_written;
fb8c4b14 911 }
1da177e4
LT
912 if (open_file->closePend) {
913 FreeXid(xid);
914 if (total_written)
915 return total_written;
916 else
917 return -EBADF;
918 }
919 if (open_file->invalidHandle) {
1da177e4
LT
920 /* we could deadlock if we called
921 filemap_fdatawait from here so tell
922 reopen_file not to flush data to server
923 now */
4b18f2a9 924 rc = cifs_reopen_file(file, false);
1da177e4
LT
925 if (rc != 0)
926 break;
927 }
928
929 rc = CIFSSMBWrite(xid, pTcon,
930 open_file->netfid,
931 min_t(const int, cifs_sb->wsize,
932 write_size - total_written),
933 *poffset, &bytes_written,
934 NULL, write_data + total_written, long_op);
935 }
936 if (rc || (bytes_written == 0)) {
937 if (total_written)
938 break;
939 else {
940 FreeXid(xid);
941 return rc;
942 }
943 } else
944 *poffset += bytes_written;
133672ef 945 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
946 15 seconds is plenty */
947 }
948
a4544347 949 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
950
951 /* since the write may have blocked check these pointers again */
3677db10
SF
952 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
953 struct inode *inode = file->f_path.dentry->d_inode;
fb8c4b14
SF
954/* Do not update local mtime - server will set its actual value on write
955 * inode->i_ctime = inode->i_mtime =
3677db10
SF
956 * current_fs_time(inode->i_sb);*/
957 if (total_written > 0) {
958 spin_lock(&inode->i_lock);
959 if (*poffset > file->f_path.dentry->d_inode->i_size)
960 i_size_write(file->f_path.dentry->d_inode,
1da177e4 961 *poffset);
3677db10 962 spin_unlock(&inode->i_lock);
1da177e4 963 }
fb8c4b14 964 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
965 }
966 FreeXid(xid);
967 return total_written;
968}
969
970static ssize_t cifs_write(struct file *file, const char *write_data,
d9414774 971 size_t write_size, loff_t *poffset)
1da177e4
LT
972{
973 int rc = 0;
974 unsigned int bytes_written = 0;
975 unsigned int total_written;
976 struct cifs_sb_info *cifs_sb;
977 struct cifsTconInfo *pTcon;
978 int xid, long_op;
979 struct cifsFileInfo *open_file;
980
e6a00296 981 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
982
983 pTcon = cifs_sb->tcon;
984
fb8c4b14 985 cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
e6a00296 986 *poffset, file->f_path.dentry->d_name.name));
1da177e4
LT
987
988 if (file->private_data == NULL)
989 return -EBADF;
c33f8d32 990 open_file = (struct cifsFileInfo *)file->private_data;
50c2f753 991
1da177e4 992 xid = GetXid();
1da177e4 993
e6a00296 994 if (*poffset > file->f_path.dentry->d_inode->i_size)
133672ef 995 long_op = CIFS_VLONG_OP; /* writes past EOF can be slow */
1da177e4 996 else
133672ef 997 long_op = CIFS_LONG_OP;
1da177e4
LT
998
999 for (total_written = 0; write_size > total_written;
1000 total_written += bytes_written) {
1001 rc = -EAGAIN;
1002 while (rc == -EAGAIN) {
1003 if (file->private_data == NULL) {
1004 /* file has been closed on us */
1005 FreeXid(xid);
1006 /* if we have gotten here we have written some data
1007 and blocked, and the file has been freed on us
fb8c4b14 1008 while we blocked so return what we managed to
1da177e4
LT
1009 write */
1010 return total_written;
fb8c4b14 1011 }
1da177e4
LT
1012 if (open_file->closePend) {
1013 FreeXid(xid);
1014 if (total_written)
1015 return total_written;
1016 else
1017 return -EBADF;
1018 }
1019 if (open_file->invalidHandle) {
1da177e4
LT
1020 /* we could deadlock if we called
1021 filemap_fdatawait from here so tell
fb8c4b14 1022 reopen_file not to flush data to
1da177e4 1023 server now */
4b18f2a9 1024 rc = cifs_reopen_file(file, false);
1da177e4
LT
1025 if (rc != 0)
1026 break;
1027 }
fb8c4b14
SF
1028 if (experimEnabled || (pTcon->ses->server &&
1029 ((pTcon->ses->server->secMode &
08775834 1030 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 1031 == 0))) {
3e84469d
SF
1032 struct kvec iov[2];
1033 unsigned int len;
1034
0ae0efad 1035 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
1036 write_size - total_written);
1037 /* iov[0] is reserved for smb header */
1038 iov[1].iov_base = (char *)write_data +
1039 total_written;
1040 iov[1].iov_len = len;
d6e04ae6 1041 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 1042 open_file->netfid, len,
d6e04ae6 1043 *poffset, &bytes_written,
3e84469d 1044 iov, 1, long_op);
d6e04ae6 1045 } else
60808233
SF
1046 rc = CIFSSMBWrite(xid, pTcon,
1047 open_file->netfid,
1048 min_t(const int, cifs_sb->wsize,
1049 write_size - total_written),
1050 *poffset, &bytes_written,
1051 write_data + total_written,
1052 NULL, long_op);
1da177e4
LT
1053 }
1054 if (rc || (bytes_written == 0)) {
1055 if (total_written)
1056 break;
1057 else {
1058 FreeXid(xid);
1059 return rc;
1060 }
1061 } else
1062 *poffset += bytes_written;
133672ef 1063 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1064 15 seconds is plenty */
1065 }
1066
a4544347 1067 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1068
1069 /* since the write may have blocked check these pointers again */
3677db10 1070 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
004c46b9 1071/*BB We could make this contingent on superblock ATIME flag too */
3677db10
SF
1072/* file->f_path.dentry->d_inode->i_ctime =
1073 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1074 if (total_written > 0) {
1075 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1076 if (*poffset > file->f_path.dentry->d_inode->i_size)
1077 i_size_write(file->f_path.dentry->d_inode,
1078 *poffset);
1079 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1da177e4 1080 }
3677db10 1081 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1082 }
1083 FreeXid(xid);
1084 return total_written;
1085}
1086
630f3f0c
SF
1087#ifdef CONFIG_CIFS_EXPERIMENTAL
1088struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1089{
1090 struct cifsFileInfo *open_file = NULL;
1091
1092 read_lock(&GlobalSMBSeslock);
1093 /* we could simply get the first_list_entry since write-only entries
1094 are always at the end of the list but since the first entry might
1095 have a close pending, we go through the whole list */
1096 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1097 if (open_file->closePend)
1098 continue;
1099 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1100 (open_file->pfile->f_flags & O_RDONLY))) {
1101 if (!open_file->invalidHandle) {
1102 /* found a good file */
1103 /* lock it so it will not be closed on us */
1104 atomic_inc(&open_file->wrtPending);
1105 read_unlock(&GlobalSMBSeslock);
1106 return open_file;
1107 } /* else might as well continue, and look for
1108 another, or simply have the caller reopen it
1109 again rather than trying to fix this handle */
1110 } else /* write only file */
1111 break; /* write only files are last so must be done */
1112 }
1113 read_unlock(&GlobalSMBSeslock);
1114 return NULL;
1115}
1116#endif
1117
dd99cd80 1118struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
1119{
1120 struct cifsFileInfo *open_file;
2846d386 1121 bool any_available = false;
dd99cd80 1122 int rc;
6148a742 1123
60808233
SF
1124 /* Having a null inode here (because mapping->host was set to zero by
1125 the VFS or MM) should not happen but we had reports of on oops (due to
1126 it being zero) during stress testcases so we need to check for it */
1127
fb8c4b14
SF
1128 if (cifs_inode == NULL) {
1129 cERROR(1, ("Null inode passed to cifs_writeable_file"));
60808233
SF
1130 dump_stack();
1131 return NULL;
1132 }
1133
6148a742 1134 read_lock(&GlobalSMBSeslock);
9b22b0b7 1135refind_writable:
6148a742 1136 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2846d386
JL
1137 if (open_file->closePend ||
1138 (!any_available && open_file->pid != current->tgid))
6148a742 1139 continue;
2846d386 1140
6148a742
SF
1141 if (open_file->pfile &&
1142 ((open_file->pfile->f_flags & O_RDWR) ||
1143 (open_file->pfile->f_flags & O_WRONLY))) {
23e7dd7d 1144 atomic_inc(&open_file->wrtPending);
9b22b0b7
SF
1145
1146 if (!open_file->invalidHandle) {
1147 /* found a good writable file */
1148 read_unlock(&GlobalSMBSeslock);
1149 return open_file;
1150 }
8840dee9 1151
6148a742 1152 read_unlock(&GlobalSMBSeslock);
9b22b0b7 1153 /* Had to unlock since following call can block */
4b18f2a9 1154 rc = cifs_reopen_file(open_file->pfile, false);
8840dee9 1155 if (!rc) {
9b22b0b7
SF
1156 if (!open_file->closePend)
1157 return open_file;
1158 else { /* start over in case this was deleted */
1159 /* since the list could be modified */
37c0eb46 1160 read_lock(&GlobalSMBSeslock);
15745320 1161 atomic_dec(&open_file->wrtPending);
9b22b0b7 1162 goto refind_writable;
37c0eb46
SF
1163 }
1164 }
9b22b0b7
SF
1165
1166 /* if it fails, try another handle if possible -
1167 (we can not do this if closePending since
1168 loop could be modified - in which case we
1169 have to start at the beginning of the list
1170 again. Note that it would be bad
1171 to hold up writepages here (rather than
1172 in caller) with continuous retries */
1173 cFYI(1, ("wp failed on reopen file"));
1174 read_lock(&GlobalSMBSeslock);
1175 /* can not use this handle, no write
1176 pending on this one after all */
1177 atomic_dec(&open_file->wrtPending);
8840dee9 1178
9b22b0b7
SF
1179 if (open_file->closePend) /* list could have changed */
1180 goto refind_writable;
1181 /* else we simply continue to the next entry. Thus
1182 we do not loop on reopen errors. If we
1183 can not reopen the file, for example if we
1184 reconnected to a server with another client
1185 racing to delete or lock the file we would not
1186 make progress if we restarted before the beginning
1187 of the loop here. */
6148a742
SF
1188 }
1189 }
2846d386
JL
1190 /* couldn't find useable FH with same pid, try any available */
1191 if (!any_available) {
1192 any_available = true;
1193 goto refind_writable;
1194 }
6148a742
SF
1195 read_unlock(&GlobalSMBSeslock);
1196 return NULL;
1197}
1198
1da177e4
LT
1199static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1200{
1201 struct address_space *mapping = page->mapping;
1202 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1203 char *write_data;
1204 int rc = -EFAULT;
1205 int bytes_written = 0;
1206 struct cifs_sb_info *cifs_sb;
1207 struct cifsTconInfo *pTcon;
1208 struct inode *inode;
6148a742 1209 struct cifsFileInfo *open_file;
1da177e4
LT
1210
1211 if (!mapping || !mapping->host)
1212 return -EFAULT;
1213
1214 inode = page->mapping->host;
1215 cifs_sb = CIFS_SB(inode->i_sb);
1216 pTcon = cifs_sb->tcon;
1217
1218 offset += (loff_t)from;
1219 write_data = kmap(page);
1220 write_data += from;
1221
1222 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1223 kunmap(page);
1224 return -EIO;
1225 }
1226
1227 /* racing with truncate? */
1228 if (offset > mapping->host->i_size) {
1229 kunmap(page);
1230 return 0; /* don't care */
1231 }
1232
1233 /* check to make sure that we are not extending the file */
1234 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1235 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1236
6148a742
SF
1237 open_file = find_writable_file(CIFS_I(mapping->host));
1238 if (open_file) {
1239 bytes_written = cifs_write(open_file->pfile, write_data,
1240 to-from, &offset);
23e7dd7d 1241 atomic_dec(&open_file->wrtPending);
1da177e4 1242 /* Does mm or vfs already set times? */
6148a742 1243 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1244 if ((bytes_written > 0) && (offset))
6148a742 1245 rc = 0;
bb5a9a04
SF
1246 else if (bytes_written < 0)
1247 rc = bytes_written;
6148a742 1248 } else {
1da177e4
LT
1249 cFYI(1, ("No writeable filehandles for inode"));
1250 rc = -EIO;
1251 }
1252
1253 kunmap(page);
1254 return rc;
1255}
1256
1da177e4 1257static int cifs_writepages(struct address_space *mapping,
37c0eb46 1258 struct writeback_control *wbc)
1da177e4 1259{
37c0eb46
SF
1260 struct backing_dev_info *bdi = mapping->backing_dev_info;
1261 unsigned int bytes_to_write;
1262 unsigned int bytes_written;
1263 struct cifs_sb_info *cifs_sb;
1264 int done = 0;
111ebb6e 1265 pgoff_t end;
37c0eb46 1266 pgoff_t index;
fb8c4b14
SF
1267 int range_whole = 0;
1268 struct kvec *iov;
84d2f07e 1269 int len;
37c0eb46
SF
1270 int n_iov = 0;
1271 pgoff_t next;
1272 int nr_pages;
1273 __u64 offset = 0;
23e7dd7d 1274 struct cifsFileInfo *open_file;
37c0eb46
SF
1275 struct page *page;
1276 struct pagevec pvec;
1277 int rc = 0;
1278 int scanned = 0;
1da177e4
LT
1279 int xid;
1280
37c0eb46 1281 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1282
37c0eb46
SF
1283 /*
1284 * If wsize is smaller that the page cache size, default to writing
1285 * one page at a time via cifs_writepage
1286 */
1287 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1288 return generic_writepages(mapping, wbc);
1289
fb8c4b14
SF
1290 if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1291 if (cifs_sb->tcon->ses->server->secMode &
1292 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1293 if (!experimEnabled)
60808233 1294 return generic_writepages(mapping, wbc);
4a77118c 1295
9a0c8230 1296 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1297 if (iov == NULL)
9a0c8230
SF
1298 return generic_writepages(mapping, wbc);
1299
1300
37c0eb46
SF
1301 /*
1302 * BB: Is this meaningful for a non-block-device file system?
1303 * If it is, we should test it again after we do I/O
1304 */
1305 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1306 wbc->encountered_congestion = 1;
9a0c8230 1307 kfree(iov);
37c0eb46
SF
1308 return 0;
1309 }
1310
1da177e4
LT
1311 xid = GetXid();
1312
37c0eb46 1313 pagevec_init(&pvec, 0);
111ebb6e 1314 if (wbc->range_cyclic) {
37c0eb46 1315 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1316 end = -1;
1317 } else {
1318 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1319 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1320 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1321 range_whole = 1;
37c0eb46
SF
1322 scanned = 1;
1323 }
1324retry:
1325 while (!done && (index <= end) &&
1326 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1327 PAGECACHE_TAG_DIRTY,
1328 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1329 int first;
1330 unsigned int i;
1331
37c0eb46
SF
1332 first = -1;
1333 next = 0;
1334 n_iov = 0;
1335 bytes_to_write = 0;
1336
1337 for (i = 0; i < nr_pages; i++) {
1338 page = pvec.pages[i];
1339 /*
1340 * At this point we hold neither mapping->tree_lock nor
1341 * lock on the page itself: the page may be truncated or
1342 * invalidated (changing page->mapping to NULL), or even
1343 * swizzled back from swapper_space to tmpfs file
1344 * mapping
1345 */
1346
1347 if (first < 0)
1348 lock_page(page);
529ae9aa 1349 else if (!trylock_page(page))
37c0eb46
SF
1350 break;
1351
1352 if (unlikely(page->mapping != mapping)) {
1353 unlock_page(page);
1354 break;
1355 }
1356
111ebb6e 1357 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1358 done = 1;
1359 unlock_page(page);
1360 break;
1361 }
1362
1363 if (next && (page->index != next)) {
1364 /* Not next consecutive page */
1365 unlock_page(page);
1366 break;
1367 }
1368
1369 if (wbc->sync_mode != WB_SYNC_NONE)
1370 wait_on_page_writeback(page);
1371
1372 if (PageWriteback(page) ||
cb876f45 1373 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1374 unlock_page(page);
1375 break;
1376 }
84d2f07e 1377
cb876f45
LT
1378 /*
1379 * This actually clears the dirty bit in the radix tree.
1380 * See cifs_writepage() for more commentary.
1381 */
1382 set_page_writeback(page);
1383
84d2f07e
SF
1384 if (page_offset(page) >= mapping->host->i_size) {
1385 done = 1;
1386 unlock_page(page);
cb876f45 1387 end_page_writeback(page);
84d2f07e
SF
1388 break;
1389 }
1390
37c0eb46
SF
1391 /*
1392 * BB can we get rid of this? pages are held by pvec
1393 */
1394 page_cache_get(page);
1395
84d2f07e
SF
1396 len = min(mapping->host->i_size - page_offset(page),
1397 (loff_t)PAGE_CACHE_SIZE);
1398
37c0eb46
SF
1399 /* reserve iov[0] for the smb header */
1400 n_iov++;
1401 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1402 iov[n_iov].iov_len = len;
1403 bytes_to_write += len;
37c0eb46
SF
1404
1405 if (first < 0) {
1406 first = i;
1407 offset = page_offset(page);
1408 }
1409 next = page->index + 1;
1410 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1411 break;
1412 }
1413 if (n_iov) {
23e7dd7d
SF
1414 /* Search for a writable handle every time we call
1415 * CIFSSMBWrite2. We can't rely on the last handle
1416 * we used to still be valid
1417 */
1418 open_file = find_writable_file(CIFS_I(mapping->host));
1419 if (!open_file) {
1420 cERROR(1, ("No writable handles for inode"));
1421 rc = -EBADF;
1047abc1 1422 } else {
23e7dd7d
SF
1423 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1424 open_file->netfid,
1425 bytes_to_write, offset,
1426 &bytes_written, iov, n_iov,
133672ef 1427 CIFS_LONG_OP);
23e7dd7d
SF
1428 atomic_dec(&open_file->wrtPending);
1429 if (rc || bytes_written < bytes_to_write) {
63135e08 1430 cERROR(1, ("Write2 ret %d, wrote %d",
23e7dd7d
SF
1431 rc, bytes_written));
1432 /* BB what if continued retry is
1433 requested via mount flags? */
cea21805
JL
1434 if (rc == -ENOSPC)
1435 set_bit(AS_ENOSPC, &mapping->flags);
1436 else
1437 set_bit(AS_EIO, &mapping->flags);
23e7dd7d
SF
1438 } else {
1439 cifs_stats_bytes_written(cifs_sb->tcon,
1440 bytes_written);
1441 }
37c0eb46
SF
1442 }
1443 for (i = 0; i < n_iov; i++) {
1444 page = pvec.pages[first + i];
eb9bdaa3
SF
1445 /* Should we also set page error on
1446 success rc but too little data written? */
1447 /* BB investigate retry logic on temporary
1448 server crash cases and how recovery works
fb8c4b14
SF
1449 when page marked as error */
1450 if (rc)
eb9bdaa3 1451 SetPageError(page);
37c0eb46
SF
1452 kunmap(page);
1453 unlock_page(page);
cb876f45 1454 end_page_writeback(page);
37c0eb46
SF
1455 page_cache_release(page);
1456 }
1457 if ((wbc->nr_to_write -= n_iov) <= 0)
1458 done = 1;
1459 index = next;
b066a48c
DK
1460 } else
1461 /* Need to re-find the pages we skipped */
1462 index = pvec.pages[0]->index + 1;
1463
37c0eb46
SF
1464 pagevec_release(&pvec);
1465 }
1466 if (!scanned && !done) {
1467 /*
1468 * We hit the last page and there is more work to be done: wrap
1469 * back to the start of the file
1470 */
1471 scanned = 1;
1472 index = 0;
1473 goto retry;
1474 }
111ebb6e 1475 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1476 mapping->writeback_index = index;
1477
1da177e4 1478 FreeXid(xid);
9a0c8230 1479 kfree(iov);
1da177e4
LT
1480 return rc;
1481}
1da177e4 1482
fb8c4b14 1483static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1484{
1485 int rc = -EFAULT;
1486 int xid;
1487
1488 xid = GetXid();
1489/* BB add check for wbc flags */
1490 page_cache_get(page);
ad7a2926 1491 if (!PageUptodate(page))
1da177e4 1492 cFYI(1, ("ppw - page not up to date"));
cb876f45
LT
1493
1494 /*
1495 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1496 *
1497 * A writepage() implementation always needs to do either this,
1498 * or re-dirty the page with "redirty_page_for_writepage()" in
1499 * the case of a failure.
1500 *
1501 * Just unlocking the page will cause the radix tree tag-bits
1502 * to fail to update with the state of the page correctly.
1503 */
fb8c4b14 1504 set_page_writeback(page);
1da177e4
LT
1505 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1506 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1507 unlock_page(page);
cb876f45
LT
1508 end_page_writeback(page);
1509 page_cache_release(page);
1da177e4
LT
1510 FreeXid(xid);
1511 return rc;
1512}
1513
d9414774
NP
1514static int cifs_write_end(struct file *file, struct address_space *mapping,
1515 loff_t pos, unsigned len, unsigned copied,
1516 struct page *page, void *fsdata)
1da177e4 1517{
d9414774
NP
1518 int rc;
1519 struct inode *inode = mapping->host;
1da177e4 1520
d9414774
NP
1521 cFYI(1, ("write_end for page %p from pos %lld with %d bytes",
1522 page, pos, copied));
1523
a98ee8c1
JL
1524 if (PageChecked(page)) {
1525 if (copied == len)
1526 SetPageUptodate(page);
1527 ClearPageChecked(page);
1528 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1529 SetPageUptodate(page);
ad7a2926 1530
1da177e4 1531 if (!PageUptodate(page)) {
d9414774
NP
1532 char *page_data;
1533 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1534 int xid;
1535
1536 xid = GetXid();
1da177e4
LT
1537 /* this is probably better than directly calling
1538 partialpage_write since in this function the file handle is
1539 known which we might as well leverage */
1540 /* BB check if anything else missing out of ppw
1541 such as updating last write time */
1542 page_data = kmap(page);
d9414774
NP
1543 rc = cifs_write(file, page_data + offset, copied, &pos);
1544 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1545 kunmap(page);
d9414774
NP
1546
1547 FreeXid(xid);
fb8c4b14 1548 } else {
d9414774
NP
1549 rc = copied;
1550 pos += copied;
1da177e4
LT
1551 set_page_dirty(page);
1552 }
1553
d9414774
NP
1554 if (rc > 0) {
1555 spin_lock(&inode->i_lock);
1556 if (pos > inode->i_size)
1557 i_size_write(inode, pos);
1558 spin_unlock(&inode->i_lock);
1559 }
1560
1561 unlock_page(page);
1562 page_cache_release(page);
1563
1da177e4
LT
1564 return rc;
1565}
1566
1567int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1568{
1569 int xid;
1570 int rc = 0;
b298f223
SF
1571 struct cifsTconInfo *tcon;
1572 struct cifsFileInfo *smbfile =
1573 (struct cifsFileInfo *)file->private_data;
e6a00296 1574 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1575
1576 xid = GetXid();
1577
fb8c4b14 1578 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1da177e4 1579 dentry->d_name.name, datasync));
50c2f753 1580
cea21805
JL
1581 rc = filemap_write_and_wait(inode->i_mapping);
1582 if (rc == 0) {
1583 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1584 CIFS_I(inode)->write_behind_rc = 0;
b298f223 1585 tcon = CIFS_SB(inode->i_sb)->tcon;
be652445 1586 if (!rc && tcon && smbfile &&
4717bed6 1587 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
b298f223 1588 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
cea21805 1589 }
b298f223 1590
1da177e4
LT
1591 FreeXid(xid);
1592 return rc;
1593}
1594
3978d717 1595/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1596{
1597 struct address_space *mapping;
1598 struct inode *inode;
1599 unsigned long index = page->index;
1600 unsigned int rpages = 0;
1601 int rc = 0;
1602
1603 cFYI(1, ("sync page %p",page));
1604 mapping = page->mapping;
1605 if (!mapping)
1606 return 0;
1607 inode = mapping->host;
1608 if (!inode)
3978d717 1609 return; */
1da177e4 1610
fb8c4b14 1611/* fill in rpages then
1da177e4
LT
1612 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1613
26a21b98 1614/* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1da177e4 1615
3978d717 1616#if 0
1da177e4
LT
1617 if (rc < 0)
1618 return rc;
1619 return 0;
3978d717 1620#endif
1da177e4
LT
1621} */
1622
1623/*
1624 * As file closes, flush all cached write data for this inode checking
1625 * for write behind errors.
1626 */
75e1fcc0 1627int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1628{
fb8c4b14 1629 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1630 int rc = 0;
1631
1632 /* Rather than do the steps manually:
1633 lock the inode for writing
1634 loop through pages looking for write behind data (dirty pages)
1635 coalesce into contiguous 16K (or smaller) chunks to write to server
1636 send to server (prefer in parallel)
1637 deal with writebehind errors
1638 unlock inode for writing
1639 filemapfdatawrite appears easier for the time being */
1640
1641 rc = filemap_fdatawrite(inode->i_mapping);
cea21805
JL
1642 /* reset wb rc if we were able to write out dirty pages */
1643 if (!rc) {
1644 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1645 CIFS_I(inode)->write_behind_rc = 0;
cea21805 1646 }
50c2f753 1647
fb8c4b14 1648 cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
1da177e4
LT
1649
1650 return rc;
1651}
1652
1653ssize_t cifs_user_read(struct file *file, char __user *read_data,
1654 size_t read_size, loff_t *poffset)
1655{
1656 int rc = -EACCES;
1657 unsigned int bytes_read = 0;
1658 unsigned int total_read = 0;
1659 unsigned int current_read_size;
1660 struct cifs_sb_info *cifs_sb;
1661 struct cifsTconInfo *pTcon;
1662 int xid;
1663 struct cifsFileInfo *open_file;
1664 char *smb_read_data;
1665 char __user *current_offset;
1666 struct smb_com_read_rsp *pSMBr;
1667
1668 xid = GetXid();
e6a00296 1669 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1670 pTcon = cifs_sb->tcon;
1671
1672 if (file->private_data == NULL) {
1673 FreeXid(xid);
1674 return -EBADF;
1675 }
1676 open_file = (struct cifsFileInfo *)file->private_data;
1677
ad7a2926 1678 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1da177e4 1679 cFYI(1, ("attempting read on write only file instance"));
ad7a2926 1680
1da177e4
LT
1681 for (total_read = 0, current_offset = read_data;
1682 read_size > total_read;
1683 total_read += bytes_read, current_offset += bytes_read) {
fb8c4b14 1684 current_read_size = min_t(const int, read_size - total_read,
1da177e4
LT
1685 cifs_sb->rsize);
1686 rc = -EAGAIN;
1687 smb_read_data = NULL;
1688 while (rc == -EAGAIN) {
ec637e3f 1689 int buf_type = CIFS_NO_BUFFER;
fb8c4b14 1690 if ((open_file->invalidHandle) &&
1da177e4 1691 (!open_file->closePend)) {
4b18f2a9 1692 rc = cifs_reopen_file(file, true);
1da177e4
LT
1693 if (rc != 0)
1694 break;
1695 }
bfa0d75a 1696 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1697 open_file->netfid,
1698 current_read_size, *poffset,
1699 &bytes_read, &smb_read_data,
1700 &buf_type);
1da177e4 1701 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1702 if (smb_read_data) {
93544cc6
SF
1703 if (copy_to_user(current_offset,
1704 smb_read_data +
1705 4 /* RFC1001 length field */ +
1706 le16_to_cpu(pSMBr->DataOffset),
ad7a2926 1707 bytes_read))
93544cc6 1708 rc = -EFAULT;
93544cc6 1709
fb8c4b14 1710 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1711 cifs_small_buf_release(smb_read_data);
fb8c4b14 1712 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1713 cifs_buf_release(smb_read_data);
1da177e4
LT
1714 smb_read_data = NULL;
1715 }
1716 }
1717 if (rc || (bytes_read == 0)) {
1718 if (total_read) {
1719 break;
1720 } else {
1721 FreeXid(xid);
1722 return rc;
1723 }
1724 } else {
a4544347 1725 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1726 *poffset += bytes_read;
1727 }
1728 }
1729 FreeXid(xid);
1730 return total_read;
1731}
1732
1733
1734static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1735 loff_t *poffset)
1736{
1737 int rc = -EACCES;
1738 unsigned int bytes_read = 0;
1739 unsigned int total_read;
1740 unsigned int current_read_size;
1741 struct cifs_sb_info *cifs_sb;
1742 struct cifsTconInfo *pTcon;
1743 int xid;
1744 char *current_offset;
1745 struct cifsFileInfo *open_file;
ec637e3f 1746 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1747
1748 xid = GetXid();
e6a00296 1749 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1750 pTcon = cifs_sb->tcon;
1751
1752 if (file->private_data == NULL) {
1753 FreeXid(xid);
1754 return -EBADF;
1755 }
1756 open_file = (struct cifsFileInfo *)file->private_data;
1757
1758 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1759 cFYI(1, ("attempting read on write only file instance"));
1760
fb8c4b14 1761 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1762 read_size > total_read;
1763 total_read += bytes_read, current_offset += bytes_read) {
1764 current_read_size = min_t(const int, read_size - total_read,
1765 cifs_sb->rsize);
f9f5c817
SF
1766 /* For windows me and 9x we do not want to request more
1767 than it negotiated since it will refuse the read then */
fb8c4b14 1768 if ((pTcon->ses) &&
f9f5c817
SF
1769 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1770 current_read_size = min_t(const int, current_read_size,
1771 pTcon->ses->server->maxBuf - 128);
1772 }
1da177e4
LT
1773 rc = -EAGAIN;
1774 while (rc == -EAGAIN) {
fb8c4b14 1775 if ((open_file->invalidHandle) &&
1da177e4 1776 (!open_file->closePend)) {
4b18f2a9 1777 rc = cifs_reopen_file(file, true);
1da177e4
LT
1778 if (rc != 0)
1779 break;
1780 }
bfa0d75a 1781 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1782 open_file->netfid,
1783 current_read_size, *poffset,
1784 &bytes_read, &current_offset,
1785 &buf_type);
1da177e4
LT
1786 }
1787 if (rc || (bytes_read == 0)) {
1788 if (total_read) {
1789 break;
1790 } else {
1791 FreeXid(xid);
1792 return rc;
1793 }
1794 } else {
a4544347 1795 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1796 *poffset += bytes_read;
1797 }
1798 }
1799 FreeXid(xid);
1800 return total_read;
1801}
1802
1803int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1804{
e6a00296 1805 struct dentry *dentry = file->f_path.dentry;
1da177e4
LT
1806 int rc, xid;
1807
1808 xid = GetXid();
1809 rc = cifs_revalidate(dentry);
1810 if (rc) {
1811 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1812 FreeXid(xid);
1813 return rc;
1814 }
1815 rc = generic_file_mmap(file, vma);
1816 FreeXid(xid);
1817 return rc;
1818}
1819
1820
fb8c4b14 1821static void cifs_copy_cache_pages(struct address_space *mapping,
1da177e4
LT
1822 struct list_head *pages, int bytes_read, char *data,
1823 struct pagevec *plru_pvec)
1824{
1825 struct page *page;
1826 char *target;
1827
1828 while (bytes_read > 0) {
1829 if (list_empty(pages))
1830 break;
1831
1832 page = list_entry(pages->prev, struct page, lru);
1833 list_del(&page->lru);
1834
1835 if (add_to_page_cache(page, mapping, page->index,
1836 GFP_KERNEL)) {
1837 page_cache_release(page);
1838 cFYI(1, ("Add page cache failed"));
3079ca62
SF
1839 data += PAGE_CACHE_SIZE;
1840 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1841 continue;
1842 }
1843
fb8c4b14 1844 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1845
1846 if (PAGE_CACHE_SIZE > bytes_read) {
1847 memcpy(target, data, bytes_read);
1848 /* zero the tail end of this partial page */
fb8c4b14 1849 memset(target + bytes_read, 0,
1da177e4
LT
1850 PAGE_CACHE_SIZE - bytes_read);
1851 bytes_read = 0;
1852 } else {
1853 memcpy(target, data, PAGE_CACHE_SIZE);
1854 bytes_read -= PAGE_CACHE_SIZE;
1855 }
1856 kunmap_atomic(target, KM_USER0);
1857
1858 flush_dcache_page(page);
1859 SetPageUptodate(page);
1860 unlock_page(page);
1861 if (!pagevec_add(plru_pvec, page))
4f98a2fe 1862 __pagevec_lru_add_file(plru_pvec);
1da177e4
LT
1863 data += PAGE_CACHE_SIZE;
1864 }
1865 return;
1866}
1867
1868static int cifs_readpages(struct file *file, struct address_space *mapping,
1869 struct list_head *page_list, unsigned num_pages)
1870{
1871 int rc = -EACCES;
1872 int xid;
1873 loff_t offset;
1874 struct page *page;
1875 struct cifs_sb_info *cifs_sb;
1876 struct cifsTconInfo *pTcon;
2c2130e1 1877 unsigned int bytes_read = 0;
fb8c4b14 1878 unsigned int read_size, i;
1da177e4
LT
1879 char *smb_read_data = NULL;
1880 struct smb_com_read_rsp *pSMBr;
1881 struct pagevec lru_pvec;
1882 struct cifsFileInfo *open_file;
ec637e3f 1883 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1884
1885 xid = GetXid();
1886 if (file->private_data == NULL) {
1887 FreeXid(xid);
1888 return -EBADF;
1889 }
1890 open_file = (struct cifsFileInfo *)file->private_data;
e6a00296 1891 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1892 pTcon = cifs_sb->tcon;
bfa0d75a 1893
1da177e4 1894 pagevec_init(&lru_pvec, 0);
61de800d 1895 cFYI(DBG2, ("rpages: num pages %d", num_pages));
1da177e4
LT
1896 for (i = 0; i < num_pages; ) {
1897 unsigned contig_pages;
1898 struct page *tmp_page;
1899 unsigned long expected_index;
1900
1901 if (list_empty(page_list))
1902 break;
1903
1904 page = list_entry(page_list->prev, struct page, lru);
1905 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1906
1907 /* count adjacent pages that we will read into */
1908 contig_pages = 0;
fb8c4b14 1909 expected_index =
1da177e4 1910 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 1911 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
1912 if (tmp_page->index == expected_index) {
1913 contig_pages++;
1914 expected_index++;
1915 } else
fb8c4b14 1916 break;
1da177e4
LT
1917 }
1918 if (contig_pages + i > num_pages)
1919 contig_pages = num_pages - i;
1920
1921 /* for reads over a certain size could initiate async
1922 read ahead */
1923
1924 read_size = contig_pages * PAGE_CACHE_SIZE;
1925 /* Read size needs to be in multiples of one page */
1926 read_size = min_t(const unsigned int, read_size,
1927 cifs_sb->rsize & PAGE_CACHE_MASK);
90c81e0b 1928 cFYI(DBG2, ("rpages: read size 0x%x contiguous pages %d",
75865f8c 1929 read_size, contig_pages));
1da177e4
LT
1930 rc = -EAGAIN;
1931 while (rc == -EAGAIN) {
fb8c4b14 1932 if ((open_file->invalidHandle) &&
1da177e4 1933 (!open_file->closePend)) {
4b18f2a9 1934 rc = cifs_reopen_file(file, true);
1da177e4
LT
1935 if (rc != 0)
1936 break;
1937 }
1938
bfa0d75a 1939 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1940 open_file->netfid,
1941 read_size, offset,
1942 &bytes_read, &smb_read_data,
1943 &buf_type);
a9d02ad4 1944 /* BB more RC checks ? */
fb8c4b14 1945 if (rc == -EAGAIN) {
1da177e4 1946 if (smb_read_data) {
fb8c4b14 1947 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1948 cifs_small_buf_release(smb_read_data);
fb8c4b14 1949 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1950 cifs_buf_release(smb_read_data);
1da177e4
LT
1951 smb_read_data = NULL;
1952 }
1953 }
1954 }
1955 if ((rc < 0) || (smb_read_data == NULL)) {
1956 cFYI(1, ("Read error in readpages: %d", rc));
1da177e4
LT
1957 break;
1958 } else if (bytes_read > 0) {
6f88cc2e 1959 task_io_account_read(bytes_read);
1da177e4
LT
1960 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1961 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1962 smb_read_data + 4 /* RFC1001 hdr */ +
1963 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1964
1965 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 1966 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 1967 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
1968 i++; /* account for partial page */
1969
fb8c4b14 1970 /* server copy of file can have smaller size
1da177e4 1971 than client */
fb8c4b14
SF
1972 /* BB do we need to verify this common case ?
1973 this case is ok - if we are at server EOF
1da177e4
LT
1974 we will hit it on next read */
1975
05ac9d4b 1976 /* break; */
1da177e4
LT
1977 }
1978 } else {
1979 cFYI(1, ("No bytes read (%d) at offset %lld . "
1980 "Cleaning remaining pages from readahead list",
1981 bytes_read, offset));
fb8c4b14 1982 /* BB turn off caching and do new lookup on
1da177e4 1983 file size at server? */
1da177e4
LT
1984 break;
1985 }
1986 if (smb_read_data) {
fb8c4b14 1987 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1988 cifs_small_buf_release(smb_read_data);
fb8c4b14 1989 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1990 cifs_buf_release(smb_read_data);
1da177e4
LT
1991 smb_read_data = NULL;
1992 }
1993 bytes_read = 0;
1994 }
1995
4f98a2fe 1996 pagevec_lru_add_file(&lru_pvec);
1da177e4
LT
1997
1998/* need to free smb_read_data buf before exit */
1999 if (smb_read_data) {
fb8c4b14 2000 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2001 cifs_small_buf_release(smb_read_data);
fb8c4b14 2002 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2003 cifs_buf_release(smb_read_data);
1da177e4 2004 smb_read_data = NULL;
fb8c4b14 2005 }
1da177e4
LT
2006
2007 FreeXid(xid);
2008 return rc;
2009}
2010
2011static int cifs_readpage_worker(struct file *file, struct page *page,
2012 loff_t *poffset)
2013{
2014 char *read_data;
2015 int rc;
2016
2017 page_cache_get(page);
2018 read_data = kmap(page);
2019 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2020
1da177e4 2021 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2022
1da177e4
LT
2023 if (rc < 0)
2024 goto io_error;
2025 else
fb8c4b14
SF
2026 cFYI(1, ("Bytes read %d", rc));
2027
e6a00296
JJS
2028 file->f_path.dentry->d_inode->i_atime =
2029 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2030
1da177e4
LT
2031 if (PAGE_CACHE_SIZE > rc)
2032 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2033
2034 flush_dcache_page(page);
2035 SetPageUptodate(page);
2036 rc = 0;
fb8c4b14 2037
1da177e4 2038io_error:
fb8c4b14 2039 kunmap(page);
1da177e4
LT
2040 page_cache_release(page);
2041 return rc;
2042}
2043
2044static int cifs_readpage(struct file *file, struct page *page)
2045{
2046 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2047 int rc = -EACCES;
2048 int xid;
2049
2050 xid = GetXid();
2051
2052 if (file->private_data == NULL) {
2053 FreeXid(xid);
2054 return -EBADF;
2055 }
2056
fb8c4b14 2057 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1da177e4
LT
2058 page, (int)offset, (int)offset));
2059
2060 rc = cifs_readpage_worker(file, page, &offset);
2061
2062 unlock_page(page);
2063
2064 FreeXid(xid);
2065 return rc;
2066}
2067
a403a0a3
SF
2068static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2069{
2070 struct cifsFileInfo *open_file;
2071
2072 read_lock(&GlobalSMBSeslock);
2073 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2074 if (open_file->closePend)
2075 continue;
2076 if (open_file->pfile &&
2077 ((open_file->pfile->f_flags & O_RDWR) ||
2078 (open_file->pfile->f_flags & O_WRONLY))) {
2079 read_unlock(&GlobalSMBSeslock);
2080 return 1;
2081 }
2082 }
2083 read_unlock(&GlobalSMBSeslock);
2084 return 0;
2085}
2086
1da177e4
LT
2087/* We do not want to update the file size from server for inodes
2088 open for write - to avoid races with writepage extending
2089 the file - in the future we could consider allowing
fb8c4b14 2090 refreshing the inode only on increases in the file size
1da177e4
LT
2091 but this is tricky to do without racing with writebehind
2092 page caching in the current Linux kernel design */
4b18f2a9 2093bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2094{
a403a0a3 2095 if (!cifsInode)
4b18f2a9 2096 return true;
50c2f753 2097
a403a0a3
SF
2098 if (is_inode_writable(cifsInode)) {
2099 /* This inode is open for write at least once */
c32a0b68
SF
2100 struct cifs_sb_info *cifs_sb;
2101
c32a0b68 2102 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2103 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2104 /* since no page cache to corrupt on directio
c32a0b68 2105 we can change size safely */
4b18f2a9 2106 return true;
c32a0b68
SF
2107 }
2108
fb8c4b14 2109 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2110 return true;
7ba52631 2111
4b18f2a9 2112 return false;
23e7dd7d 2113 } else
4b18f2a9 2114 return true;
1da177e4
LT
2115}
2116
d9414774
NP
2117static int cifs_write_begin(struct file *file, struct address_space *mapping,
2118 loff_t pos, unsigned len, unsigned flags,
2119 struct page **pagep, void **fsdata)
1da177e4 2120{
d9414774
NP
2121 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2122 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2123 loff_t page_start = pos & PAGE_MASK;
2124 loff_t i_size;
2125 struct page *page;
2126 int rc = 0;
d9414774
NP
2127
2128 cFYI(1, ("write_begin from %lld len %d", (long long)pos, len));
2129
54566b2c 2130 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2131 if (!page) {
2132 rc = -ENOMEM;
2133 goto out;
2134 }
8a236264 2135
a98ee8c1
JL
2136 if (PageUptodate(page))
2137 goto out;
8a236264 2138
a98ee8c1
JL
2139 /*
2140 * If we write a full page it will be up to date, no need to read from
2141 * the server. If the write is short, we'll end up doing a sync write
2142 * instead.
2143 */
2144 if (len == PAGE_CACHE_SIZE)
2145 goto out;
8a236264 2146
a98ee8c1
JL
2147 /*
2148 * optimize away the read when we have an oplock, and we're not
2149 * expecting to use any of the data we'd be reading in. That
2150 * is, when the page lies beyond the EOF, or straddles the EOF
2151 * and the write will cover all of the existing data.
2152 */
2153 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2154 i_size = i_size_read(mapping->host);
2155 if (page_start >= i_size ||
2156 (offset == 0 && (pos + len) >= i_size)) {
2157 zero_user_segments(page, 0, offset,
2158 offset + len,
2159 PAGE_CACHE_SIZE);
2160 /*
2161 * PageChecked means that the parts of the page
2162 * to which we're not writing are considered up
2163 * to date. Once the data is copied to the
2164 * page, it can be set uptodate.
2165 */
2166 SetPageChecked(page);
2167 goto out;
2168 }
2169 }
d9414774 2170
a98ee8c1
JL
2171 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2172 /*
2173 * might as well read a page, it is fast enough. If we get
2174 * an error, we don't need to return it. cifs_write_end will
2175 * do a sync write instead since PG_uptodate isn't set.
2176 */
2177 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2178 } else {
2179 /* we could try using another file handle if there is one -
2180 but how would we lock it to prevent close of that handle
2181 racing with this read? In any case
d9414774 2182 this will be written out by write_end so is fine */
1da177e4 2183 }
a98ee8c1
JL
2184out:
2185 *pagep = page;
2186 return rc;
1da177e4
LT
2187}
2188
f5e54d6e 2189const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2190 .readpage = cifs_readpage,
2191 .readpages = cifs_readpages,
2192 .writepage = cifs_writepage,
37c0eb46 2193 .writepages = cifs_writepages,
d9414774
NP
2194 .write_begin = cifs_write_begin,
2195 .write_end = cifs_write_end,
1da177e4
LT
2196 .set_page_dirty = __set_page_dirty_nobuffers,
2197 /* .sync_page = cifs_sync_page, */
2198 /* .direct_IO = */
2199};
273d81d6
DK
2200
2201/*
2202 * cifs_readpages requires the server to support a buffer large enough to
2203 * contain the header plus one complete page of data. Otherwise, we need
2204 * to leave cifs_readpages out of the address space operations.
2205 */
f5e54d6e 2206const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2207 .readpage = cifs_readpage,
2208 .writepage = cifs_writepage,
2209 .writepages = cifs_writepages,
d9414774
NP
2210 .write_begin = cifs_write_begin,
2211 .write_end = cifs_write_end,
273d81d6
DK
2212 .set_page_dirty = __set_page_dirty_nobuffers,
2213 /* .sync_page = cifs_sync_page, */
2214 /* .direct_IO = */
2215};