fs/cifs: reopen persistent handles on reconnect
[linux-2.6-block.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
07b92d0d 46
1da177e4
LT
47static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
e10f7b55
JL
60 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
7fc8f4e9 63}
e10f7b55 64
608712fe 65static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 66{
608712fe 67 u32 posix_flags = 0;
e10f7b55 68
7fc8f4e9 69 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 70 posix_flags = SMB_O_RDONLY;
7fc8f4e9 71 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
72 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
07b92d0d 76 if (flags & O_CREAT) {
608712fe 77 posix_flags |= SMB_O_CREAT;
07b92d0d
SF
78 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
f96637be
JP
81 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
07b92d0d 83
608712fe
JL
84 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 87 if (flags & O_DSYNC)
608712fe 88 posix_flags |= SMB_O_SYNC;
7fc8f4e9 89 if (flags & O_DIRECTORY)
608712fe 90 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 91 if (flags & O_NOFOLLOW)
608712fe 92 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 93 if (flags & O_DIRECT)
608712fe 94 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
95
96 return posix_flags;
1da177e4
LT
97}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
55aa2e09
SF
107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
1da177e4
LT
109 else
110 return FILE_OPEN;
111}
112
608712fe
JL
113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
6d5786a3 115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
608712fe
JL
116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
96daf2b0 123 struct cifs_tcon *tcon;
608712fe 124
f96637be 125 cifs_dbg(FYI, "posix open %s\n", full_path);
608712fe
JL
126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
bc8ebdc4 143 cifs_remap(cifs_sb));
608712fe
JL
144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
eeb910a6
PS
174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
fb1214e4
PS
176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
eeb910a6
PS
178{
179 int rc;
fb1214e4 180 int desired_access;
eeb910a6 181 int disposition;
3d3ea8e6 182 int create_options = CREATE_NOT_DIR;
eeb910a6 183 FILE_ALL_INFO *buf;
b8c32dbb 184 struct TCP_Server_Info *server = tcon->ses->server;
226730b4 185 struct cifs_open_parms oparms;
eeb910a6 186
b8c32dbb 187 if (!server->ops->open)
fb1214e4
PS
188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
eeb910a6
PS
191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
3d3ea8e6
SP
224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
226730b4
PS
227 oparms.tcon = tcon;
228 oparms.cifs_sb = cifs_sb;
229 oparms.desired_access = desired_access;
230 oparms.create_options = create_options;
231 oparms.disposition = disposition;
232 oparms.path = full_path;
233 oparms.fid = fid;
9cbc0b73 234 oparms.reconnect = false;
226730b4
PS
235
236 rc = server->ops->open(xid, &oparms, oplock, buf);
eeb910a6
PS
237
238 if (rc)
239 goto out;
240
241 if (tcon->unix_ext)
242 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
243 xid);
244 else
245 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
42eacf9e 246 xid, fid);
eeb910a6
PS
247
248out:
249 kfree(buf);
250 return rc;
251}
252
63b7d3a4
PS
253static bool
254cifs_has_mand_locks(struct cifsInodeInfo *cinode)
255{
256 struct cifs_fid_locks *cur;
257 bool has_locks = false;
258
259 down_read(&cinode->lock_sem);
260 list_for_each_entry(cur, &cinode->llist, llist) {
261 if (!list_empty(&cur->locks)) {
262 has_locks = true;
263 break;
264 }
265 }
266 up_read(&cinode->lock_sem);
267 return has_locks;
268}
269
15ecb436 270struct cifsFileInfo *
fb1214e4 271cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
15ecb436
JL
272 struct tcon_link *tlink, __u32 oplock)
273{
1f1735cb 274 struct dentry *dentry = file_dentry(file);
2b0143b5 275 struct inode *inode = d_inode(dentry);
4b4de76e
PS
276 struct cifsInodeInfo *cinode = CIFS_I(inode);
277 struct cifsFileInfo *cfile;
f45d3416 278 struct cifs_fid_locks *fdlocks;
233839b1 279 struct cifs_tcon *tcon = tlink_tcon(tlink);
63b7d3a4 280 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e
PS
281
282 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
283 if (cfile == NULL)
284 return cfile;
285
f45d3416
PS
286 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
287 if (!fdlocks) {
288 kfree(cfile);
289 return NULL;
290 }
291
292 INIT_LIST_HEAD(&fdlocks->locks);
293 fdlocks->cfile = cfile;
294 cfile->llist = fdlocks;
1b4b55a1 295 down_write(&cinode->lock_sem);
f45d3416 296 list_add(&fdlocks->llist, &cinode->llist);
1b4b55a1 297 up_write(&cinode->lock_sem);
f45d3416 298
4b4de76e 299 cfile->count = 1;
4b4de76e
PS
300 cfile->pid = current->tgid;
301 cfile->uid = current_fsuid();
302 cfile->dentry = dget(dentry);
303 cfile->f_flags = file->f_flags;
304 cfile->invalidHandle = false;
305 cfile->tlink = cifs_get_tlink(tlink);
4b4de76e 306 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
f45d3416 307 mutex_init(&cfile->fh_mutex);
3afca265 308 spin_lock_init(&cfile->file_info_lock);
15ecb436 309
24261fc2
MG
310 cifs_sb_active(inode->i_sb);
311
63b7d3a4
PS
312 /*
313 * If the server returned a read oplock and we have mandatory brlocks,
314 * set oplock level to None.
315 */
53ef1016 316 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
f96637be 317 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
63b7d3a4
PS
318 oplock = 0;
319 }
320
3afca265 321 spin_lock(&tcon->open_file_lock);
63b7d3a4 322 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
233839b1
PS
323 oplock = fid->pending_open->oplock;
324 list_del(&fid->pending_open->olist);
325
42873b0a 326 fid->purge_cache = false;
63b7d3a4 327 server->ops->set_fid(cfile, fid, oplock);
233839b1
PS
328
329 list_add(&cfile->tlist, &tcon->openFileList);
3afca265 330
15ecb436
JL
331 /* if readable file instance put first in list*/
332 if (file->f_mode & FMODE_READ)
4b4de76e 333 list_add(&cfile->flist, &cinode->openFileList);
15ecb436 334 else
4b4de76e 335 list_add_tail(&cfile->flist, &cinode->openFileList);
3afca265 336 spin_unlock(&tcon->open_file_lock);
15ecb436 337
42873b0a 338 if (fid->purge_cache)
4f73c7d3 339 cifs_zap_mapping(inode);
42873b0a 340
4b4de76e
PS
341 file->private_data = cfile;
342 return cfile;
15ecb436
JL
343}
344
764a1b1a
JL
345struct cifsFileInfo *
346cifsFileInfo_get(struct cifsFileInfo *cifs_file)
347{
3afca265 348 spin_lock(&cifs_file->file_info_lock);
764a1b1a 349 cifsFileInfo_get_locked(cifs_file);
3afca265 350 spin_unlock(&cifs_file->file_info_lock);
764a1b1a
JL
351 return cifs_file;
352}
353
cdff08e7
SF
354/*
355 * Release a reference on the file private data. This may involve closing
5f6dbc9e 356 * the filehandle out on the server. Must be called without holding
3afca265 357 * tcon->open_file_lock and cifs_file->file_info_lock.
cdff08e7 358 */
b33879aa
JL
359void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
360{
2b0143b5 361 struct inode *inode = d_inode(cifs_file->dentry);
96daf2b0 362 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
233839b1 363 struct TCP_Server_Info *server = tcon->ses->server;
e66673e3 364 struct cifsInodeInfo *cifsi = CIFS_I(inode);
24261fc2
MG
365 struct super_block *sb = inode->i_sb;
366 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
cdff08e7 367 struct cifsLockInfo *li, *tmp;
233839b1
PS
368 struct cifs_fid fid;
369 struct cifs_pending_open open;
ca7df8e0 370 bool oplock_break_cancelled;
cdff08e7 371
3afca265
SF
372 spin_lock(&tcon->open_file_lock);
373
374 spin_lock(&cifs_file->file_info_lock);
5f6dbc9e 375 if (--cifs_file->count > 0) {
3afca265
SF
376 spin_unlock(&cifs_file->file_info_lock);
377 spin_unlock(&tcon->open_file_lock);
cdff08e7
SF
378 return;
379 }
3afca265 380 spin_unlock(&cifs_file->file_info_lock);
cdff08e7 381
233839b1
PS
382 if (server->ops->get_lease_key)
383 server->ops->get_lease_key(inode, &fid);
384
385 /* store open in pending opens to make sure we don't miss lease break */
386 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
387
cdff08e7
SF
388 /* remove it from the lists */
389 list_del(&cifs_file->flist);
390 list_del(&cifs_file->tlist);
391
392 if (list_empty(&cifsi->openFileList)) {
f96637be 393 cifs_dbg(FYI, "closing last open instance for inode %p\n",
2b0143b5 394 d_inode(cifs_file->dentry));
25364138
PS
395 /*
396 * In strict cache mode we need invalidate mapping on the last
397 * close because it may cause a error when we open this file
398 * again and get at least level II oplock.
399 */
4f8ba8a0 400 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
aff8d5ca 401 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
c6723628 402 cifs_set_oplock_level(cifsi, 0);
cdff08e7 403 }
3afca265
SF
404
405 spin_unlock(&tcon->open_file_lock);
cdff08e7 406
ca7df8e0 407 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
ad635942 408
cdff08e7 409 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
0ff78a22 410 struct TCP_Server_Info *server = tcon->ses->server;
6d5786a3 411 unsigned int xid;
0ff78a22 412
6d5786a3 413 xid = get_xid();
0ff78a22 414 if (server->ops->close)
760ad0ca
PS
415 server->ops->close(xid, tcon, &cifs_file->fid);
416 _free_xid(xid);
cdff08e7
SF
417 }
418
ca7df8e0
SP
419 if (oplock_break_cancelled)
420 cifs_done_oplock_break(cifsi);
421
233839b1
PS
422 cifs_del_pending_open(&open);
423
f45d3416
PS
424 /*
425 * Delete any outstanding lock records. We'll lose them when the file
cdff08e7
SF
426 * is closed anyway.
427 */
1b4b55a1 428 down_write(&cifsi->lock_sem);
f45d3416 429 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
cdff08e7 430 list_del(&li->llist);
85160e03 431 cifs_del_lock_waiters(li);
cdff08e7 432 kfree(li);
b33879aa 433 }
f45d3416
PS
434 list_del(&cifs_file->llist->llist);
435 kfree(cifs_file->llist);
1b4b55a1 436 up_write(&cifsi->lock_sem);
cdff08e7
SF
437
438 cifs_put_tlink(cifs_file->tlink);
439 dput(cifs_file->dentry);
24261fc2 440 cifs_sb_deactive(sb);
cdff08e7 441 kfree(cifs_file);
b33879aa
JL
442}
443
1da177e4 444int cifs_open(struct inode *inode, struct file *file)
233839b1 445
1da177e4
LT
446{
447 int rc = -EACCES;
6d5786a3 448 unsigned int xid;
590a3fe0 449 __u32 oplock;
1da177e4 450 struct cifs_sb_info *cifs_sb;
b8c32dbb 451 struct TCP_Server_Info *server;
96daf2b0 452 struct cifs_tcon *tcon;
7ffec372 453 struct tcon_link *tlink;
fb1214e4 454 struct cifsFileInfo *cfile = NULL;
1da177e4 455 char *full_path = NULL;
7e12eddb 456 bool posix_open_ok = false;
fb1214e4 457 struct cifs_fid fid;
233839b1 458 struct cifs_pending_open open;
1da177e4 459
6d5786a3 460 xid = get_xid();
1da177e4
LT
461
462 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
463 tlink = cifs_sb_tlink(cifs_sb);
464 if (IS_ERR(tlink)) {
6d5786a3 465 free_xid(xid);
7ffec372
JL
466 return PTR_ERR(tlink);
467 }
468 tcon = tlink_tcon(tlink);
b8c32dbb 469 server = tcon->ses->server;
1da177e4 470
1f1735cb 471 full_path = build_path_from_dentry(file_dentry(file));
1da177e4 472 if (full_path == NULL) {
0f3bc09e 473 rc = -ENOMEM;
232341ba 474 goto out;
1da177e4
LT
475 }
476
f96637be 477 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
b6b38f70 478 inode, file->f_flags, full_path);
276a74a4 479
787aded6
NJ
480 if (file->f_flags & O_DIRECT &&
481 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
483 file->f_op = &cifs_file_direct_nobrl_ops;
484 else
485 file->f_op = &cifs_file_direct_ops;
486 }
487
233839b1 488 if (server->oplocks)
276a74a4
SF
489 oplock = REQ_OPLOCK;
490 else
491 oplock = 0;
492
64cc2c63 493 if (!tcon->broken_posix_open && tcon->unix_ext &&
29e20f9c
PS
494 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
495 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 496 /* can not refresh inode info since size could be stale */
2422f676 497 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 498 cifs_sb->mnt_file_mode /* ignored */,
fb1214e4 499 file->f_flags, &oplock, &fid.netfid, xid);
276a74a4 500 if (rc == 0) {
f96637be 501 cifs_dbg(FYI, "posix open succeeded\n");
7e12eddb 502 posix_open_ok = true;
64cc2c63
SF
503 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
504 if (tcon->ses->serverNOS)
f96637be
JP
505 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
506 tcon->ses->serverName,
507 tcon->ses->serverNOS);
64cc2c63 508 tcon->broken_posix_open = true;
276a74a4
SF
509 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
510 (rc != -EOPNOTSUPP)) /* path not found or net err */
511 goto out;
fb1214e4
PS
512 /*
513 * Else fallthrough to retry open the old way on network i/o
514 * or DFS errors.
515 */
276a74a4
SF
516 }
517
233839b1
PS
518 if (server->ops->get_lease_key)
519 server->ops->get_lease_key(inode, &fid);
520
521 cifs_add_pending_open(&fid, tlink, &open);
522
7e12eddb 523 if (!posix_open_ok) {
b8c32dbb
PS
524 if (server->ops->get_lease_key)
525 server->ops->get_lease_key(inode, &fid);
526
7e12eddb 527 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
fb1214e4 528 file->f_flags, &oplock, &fid, xid);
233839b1
PS
529 if (rc) {
530 cifs_del_pending_open(&open);
7e12eddb 531 goto out;
233839b1 532 }
7e12eddb 533 }
47c78b7f 534
fb1214e4
PS
535 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
536 if (cfile == NULL) {
b8c32dbb
PS
537 if (server->ops->close)
538 server->ops->close(xid, tcon, &fid);
233839b1 539 cifs_del_pending_open(&open);
1da177e4
LT
540 rc = -ENOMEM;
541 goto out;
542 }
1da177e4 543
9451a9a5
SJ
544 cifs_fscache_set_inode_cookie(inode, file);
545
7e12eddb 546 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
fb1214e4
PS
547 /*
548 * Time to set mode which we can not set earlier due to
549 * problems creating new read-only files.
550 */
7e12eddb
PS
551 struct cifs_unix_set_info_args args = {
552 .mode = inode->i_mode,
49418b2c
EB
553 .uid = INVALID_UID, /* no change */
554 .gid = INVALID_GID, /* no change */
7e12eddb
PS
555 .ctime = NO_CHANGE_64,
556 .atime = NO_CHANGE_64,
557 .mtime = NO_CHANGE_64,
558 .device = 0,
559 };
fb1214e4
PS
560 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
561 cfile->pid);
1da177e4
LT
562 }
563
564out:
1da177e4 565 kfree(full_path);
6d5786a3 566 free_xid(xid);
7ffec372 567 cifs_put_tlink(tlink);
1da177e4
LT
568 return rc;
569}
570
f152fd5f
PS
571static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
572
2ae78ba8
PS
573/*
574 * Try to reacquire byte range locks that were released when session
f152fd5f 575 * to server was lost.
2ae78ba8 576 */
f152fd5f
PS
577static int
578cifs_relock_file(struct cifsFileInfo *cfile)
1da177e4 579{
f152fd5f 580 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2b0143b5 581 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
f152fd5f 582 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1da177e4
LT
583 int rc = 0;
584
689c3db4 585 down_read(&cinode->lock_sem);
f152fd5f 586 if (cinode->can_cache_brlcks) {
689c3db4
PS
587 /* can cache locks - no need to relock */
588 up_read(&cinode->lock_sem);
f152fd5f
PS
589 return rc;
590 }
591
592 if (cap_unix(tcon->ses) &&
593 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
594 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
595 rc = cifs_push_posix_locks(cfile);
596 else
597 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1da177e4 598
689c3db4 599 up_read(&cinode->lock_sem);
1da177e4
LT
600 return rc;
601}
602
2ae78ba8
PS
603static int
604cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1da177e4
LT
605{
606 int rc = -EACCES;
6d5786a3 607 unsigned int xid;
590a3fe0 608 __u32 oplock;
1da177e4 609 struct cifs_sb_info *cifs_sb;
96daf2b0 610 struct cifs_tcon *tcon;
2ae78ba8
PS
611 struct TCP_Server_Info *server;
612 struct cifsInodeInfo *cinode;
fb8c4b14 613 struct inode *inode;
1da177e4 614 char *full_path = NULL;
2ae78ba8 615 int desired_access;
1da177e4 616 int disposition = FILE_OPEN;
3d3ea8e6 617 int create_options = CREATE_NOT_DIR;
226730b4 618 struct cifs_open_parms oparms;
1da177e4 619
6d5786a3 620 xid = get_xid();
2ae78ba8
PS
621 mutex_lock(&cfile->fh_mutex);
622 if (!cfile->invalidHandle) {
623 mutex_unlock(&cfile->fh_mutex);
0f3bc09e 624 rc = 0;
6d5786a3 625 free_xid(xid);
0f3bc09e 626 return rc;
1da177e4
LT
627 }
628
2b0143b5 629 inode = d_inode(cfile->dentry);
1da177e4 630 cifs_sb = CIFS_SB(inode->i_sb);
2ae78ba8
PS
631 tcon = tlink_tcon(cfile->tlink);
632 server = tcon->ses->server;
633
634 /*
635 * Can not grab rename sem here because various ops, including those
636 * that already have the rename sem can end up causing writepage to get
637 * called and if the server was down that means we end up here, and we
638 * can never tell if the caller already has the rename_sem.
639 */
640 full_path = build_path_from_dentry(cfile->dentry);
1da177e4 641 if (full_path == NULL) {
3a9f462f 642 rc = -ENOMEM;
2ae78ba8 643 mutex_unlock(&cfile->fh_mutex);
6d5786a3 644 free_xid(xid);
3a9f462f 645 return rc;
1da177e4
LT
646 }
647
f96637be
JP
648 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
649 inode, cfile->f_flags, full_path);
1da177e4 650
10b9b98e 651 if (tcon->ses->server->oplocks)
1da177e4
LT
652 oplock = REQ_OPLOCK;
653 else
4b18f2a9 654 oplock = 0;
1da177e4 655
29e20f9c 656 if (tcon->unix_ext && cap_unix(tcon->ses) &&
7fc8f4e9 657 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
29e20f9c 658 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
659 /*
660 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
661 * original open. Must mask them off for a reopen.
662 */
2ae78ba8 663 unsigned int oflags = cfile->f_flags &
15886177 664 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 665
2422f676 666 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
2ae78ba8 667 cifs_sb->mnt_file_mode /* ignored */,
9cbc0b73 668 oflags, &oplock, &cfile->fid.netfid, xid);
7fc8f4e9 669 if (rc == 0) {
f96637be 670 cifs_dbg(FYI, "posix reopen succeeded\n");
fe090e4e 671 oparms.reconnect = true;
7fc8f4e9
SF
672 goto reopen_success;
673 }
2ae78ba8
PS
674 /*
675 * fallthrough to retry open the old way on errors, especially
676 * in the reconnect path it is important to retry hard
677 */
7fc8f4e9
SF
678 }
679
2ae78ba8 680 desired_access = cifs_convert_flags(cfile->f_flags);
7fc8f4e9 681
3d3ea8e6
SP
682 if (backup_cred(cifs_sb))
683 create_options |= CREATE_OPEN_BACKUP_INTENT;
684
b8c32dbb 685 if (server->ops->get_lease_key)
9cbc0b73 686 server->ops->get_lease_key(inode, &cfile->fid);
b8c32dbb 687
226730b4
PS
688 oparms.tcon = tcon;
689 oparms.cifs_sb = cifs_sb;
690 oparms.desired_access = desired_access;
691 oparms.create_options = create_options;
692 oparms.disposition = disposition;
693 oparms.path = full_path;
9cbc0b73
PS
694 oparms.fid = &cfile->fid;
695 oparms.reconnect = true;
226730b4 696
2ae78ba8
PS
697 /*
698 * Can not refresh inode by passing in file_info buf to be returned by
d81b8a40 699 * ops->open and then calling get_inode_info with returned buf since
2ae78ba8
PS
700 * file might have write behind data that needs to be flushed and server
701 * version of file size can be stale. If we knew for sure that inode was
702 * not dirty locally we could do this.
703 */
226730b4 704 rc = server->ops->open(xid, &oparms, &oplock, NULL);
b33fcf1c
PS
705 if (rc == -ENOENT && oparms.reconnect == false) {
706 /* durable handle timeout is expired - open the file again */
707 rc = server->ops->open(xid, &oparms, &oplock, NULL);
708 /* indicate that we need to relock the file */
709 oparms.reconnect = true;
710 }
711
1da177e4 712 if (rc) {
2ae78ba8 713 mutex_unlock(&cfile->fh_mutex);
f96637be
JP
714 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
715 cifs_dbg(FYI, "oplock: %d\n", oplock);
15886177
JL
716 goto reopen_error_exit;
717 }
718
7fc8f4e9 719reopen_success:
2ae78ba8
PS
720 cfile->invalidHandle = false;
721 mutex_unlock(&cfile->fh_mutex);
722 cinode = CIFS_I(inode);
15886177
JL
723
724 if (can_flush) {
725 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 726 mapping_set_error(inode->i_mapping, rc);
15886177 727
15886177 728 if (tcon->unix_ext)
2ae78ba8
PS
729 rc = cifs_get_inode_info_unix(&inode, full_path,
730 inode->i_sb, xid);
15886177 731 else
2ae78ba8
PS
732 rc = cifs_get_inode_info(&inode, full_path, NULL,
733 inode->i_sb, xid, NULL);
734 }
735 /*
736 * Else we are writing out data to server already and could deadlock if
737 * we tried to flush data, and since we do not know if we have data that
738 * would invalidate the current end of file on the server we can not go
739 * to the server to get the new inode info.
740 */
741
9cbc0b73
PS
742 server->ops->set_fid(cfile, &cfile->fid, oplock);
743 if (oparms.reconnect)
744 cifs_relock_file(cfile);
15886177
JL
745
746reopen_error_exit:
1da177e4 747 kfree(full_path);
6d5786a3 748 free_xid(xid);
1da177e4
LT
749 return rc;
750}
751
752int cifs_close(struct inode *inode, struct file *file)
753{
77970693
JL
754 if (file->private_data != NULL) {
755 cifsFileInfo_put(file->private_data);
756 file->private_data = NULL;
757 }
7ee1af76 758
cdff08e7
SF
759 /* return code from the ->release op is always ignored */
760 return 0;
1da177e4
LT
761}
762
52ace1ef
SF
763void
764cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
765{
766 struct cifsFileInfo *open_file = NULL;
767 struct list_head *tmp;
768 struct list_head *tmp1;
769
770 /* list all files open on tree connection, reopen resilient handles */
771 spin_lock(&tcon->open_file_lock);
772 list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
773 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
774 spin_unlock(&tcon->open_file_lock);
775 cifs_reopen_file(open_file, false /* do not flush */);
776 spin_lock(&tcon->open_file_lock);
777 }
778 spin_unlock(&tcon->open_file_lock);
779}
780
1da177e4
LT
781int cifs_closedir(struct inode *inode, struct file *file)
782{
783 int rc = 0;
6d5786a3 784 unsigned int xid;
4b4de76e 785 struct cifsFileInfo *cfile = file->private_data;
92fc65a7
PS
786 struct cifs_tcon *tcon;
787 struct TCP_Server_Info *server;
788 char *buf;
1da177e4 789
f96637be 790 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1da177e4 791
92fc65a7
PS
792 if (cfile == NULL)
793 return rc;
794
6d5786a3 795 xid = get_xid();
92fc65a7
PS
796 tcon = tlink_tcon(cfile->tlink);
797 server = tcon->ses->server;
1da177e4 798
f96637be 799 cifs_dbg(FYI, "Freeing private data in close dir\n");
3afca265 800 spin_lock(&cfile->file_info_lock);
52755808 801 if (server->ops->dir_needs_close(cfile)) {
92fc65a7 802 cfile->invalidHandle = true;
3afca265 803 spin_unlock(&cfile->file_info_lock);
92fc65a7
PS
804 if (server->ops->close_dir)
805 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
806 else
807 rc = -ENOSYS;
f96637be 808 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
92fc65a7
PS
809 /* not much we can do if it fails anyway, ignore rc */
810 rc = 0;
811 } else
3afca265 812 spin_unlock(&cfile->file_info_lock);
92fc65a7
PS
813
814 buf = cfile->srch_inf.ntwrk_buf_start;
815 if (buf) {
f96637be 816 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
92fc65a7
PS
817 cfile->srch_inf.ntwrk_buf_start = NULL;
818 if (cfile->srch_inf.smallBuf)
819 cifs_small_buf_release(buf);
820 else
821 cifs_buf_release(buf);
1da177e4 822 }
92fc65a7
PS
823
824 cifs_put_tlink(cfile->tlink);
825 kfree(file->private_data);
826 file->private_data = NULL;
1da177e4 827 /* BB can we lock the filestruct while this is going on? */
6d5786a3 828 free_xid(xid);
1da177e4
LT
829 return rc;
830}
831
85160e03 832static struct cifsLockInfo *
fbd35aca 833cifs_lock_init(__u64 offset, __u64 length, __u8 type)
7ee1af76 834{
a88b4707 835 struct cifsLockInfo *lock =
fb8c4b14 836 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
837 if (!lock)
838 return lock;
839 lock->offset = offset;
840 lock->length = length;
841 lock->type = type;
a88b4707
PS
842 lock->pid = current->tgid;
843 INIT_LIST_HEAD(&lock->blist);
844 init_waitqueue_head(&lock->block_q);
845 return lock;
85160e03
PS
846}
847
f7ba7fe6 848void
85160e03
PS
849cifs_del_lock_waiters(struct cifsLockInfo *lock)
850{
851 struct cifsLockInfo *li, *tmp;
852 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
853 list_del_init(&li->blist);
854 wake_up(&li->block_q);
855 }
856}
857
081c0414
PS
858#define CIFS_LOCK_OP 0
859#define CIFS_READ_OP 1
860#define CIFS_WRITE_OP 2
861
862/* @rw_check : 0 - no op, 1 - read, 2 - write */
85160e03 863static bool
f45d3416
PS
864cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
865 __u64 length, __u8 type, struct cifsFileInfo *cfile,
081c0414 866 struct cifsLockInfo **conf_lock, int rw_check)
85160e03 867{
fbd35aca 868 struct cifsLockInfo *li;
f45d3416 869 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
106dc538 870 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03 871
f45d3416 872 list_for_each_entry(li, &fdlocks->locks, llist) {
85160e03
PS
873 if (offset + length <= li->offset ||
874 offset >= li->offset + li->length)
875 continue;
081c0414
PS
876 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
877 server->ops->compare_fids(cfile, cur_cfile)) {
878 /* shared lock prevents write op through the same fid */
879 if (!(li->type & server->vals->shared_lock_type) ||
880 rw_check != CIFS_WRITE_OP)
881 continue;
882 }
f45d3416
PS
883 if ((type & server->vals->shared_lock_type) &&
884 ((server->ops->compare_fids(cfile, cur_cfile) &&
885 current->tgid == li->pid) || type == li->type))
85160e03 886 continue;
579f9053
PS
887 if (conf_lock)
888 *conf_lock = li;
f45d3416 889 return true;
85160e03
PS
890 }
891 return false;
892}
893
579f9053 894bool
55157dfb 895cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
579f9053 896 __u8 type, struct cifsLockInfo **conf_lock,
081c0414 897 int rw_check)
161ebf9f 898{
fbd35aca 899 bool rc = false;
f45d3416 900 struct cifs_fid_locks *cur;
2b0143b5 901 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
fbd35aca 902
f45d3416
PS
903 list_for_each_entry(cur, &cinode->llist, llist) {
904 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
579f9053 905 cfile, conf_lock, rw_check);
fbd35aca
PS
906 if (rc)
907 break;
908 }
fbd35aca
PS
909
910 return rc;
161ebf9f
PS
911}
912
9a5101c8
PS
913/*
914 * Check if there is another lock that prevents us to set the lock (mandatory
915 * style). If such a lock exists, update the flock structure with its
916 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
917 * or leave it the same if we can't. Returns 0 if we don't need to request to
918 * the server or 1 otherwise.
919 */
85160e03 920static int
fbd35aca
PS
921cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
922 __u8 type, struct file_lock *flock)
85160e03
PS
923{
924 int rc = 0;
925 struct cifsLockInfo *conf_lock;
2b0143b5 926 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
106dc538 927 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03
PS
928 bool exist;
929
1b4b55a1 930 down_read(&cinode->lock_sem);
85160e03 931
55157dfb 932 exist = cifs_find_lock_conflict(cfile, offset, length, type,
081c0414 933 &conf_lock, CIFS_LOCK_OP);
85160e03
PS
934 if (exist) {
935 flock->fl_start = conf_lock->offset;
936 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
937 flock->fl_pid = conf_lock->pid;
106dc538 938 if (conf_lock->type & server->vals->shared_lock_type)
85160e03
PS
939 flock->fl_type = F_RDLCK;
940 else
941 flock->fl_type = F_WRLCK;
942 } else if (!cinode->can_cache_brlcks)
943 rc = 1;
944 else
945 flock->fl_type = F_UNLCK;
946
1b4b55a1 947 up_read(&cinode->lock_sem);
85160e03
PS
948 return rc;
949}
950
161ebf9f 951static void
fbd35aca 952cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
85160e03 953{
2b0143b5 954 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1b4b55a1 955 down_write(&cinode->lock_sem);
f45d3416 956 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 957 up_write(&cinode->lock_sem);
7ee1af76
JA
958}
959
9a5101c8
PS
960/*
961 * Set the byte-range lock (mandatory style). Returns:
962 * 1) 0, if we set the lock and don't need to request to the server;
963 * 2) 1, if no locks prevent us but we need to request to the server;
964 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
965 */
85160e03 966static int
fbd35aca 967cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
161ebf9f 968 bool wait)
85160e03 969{
161ebf9f 970 struct cifsLockInfo *conf_lock;
2b0143b5 971 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
85160e03
PS
972 bool exist;
973 int rc = 0;
974
85160e03
PS
975try_again:
976 exist = false;
1b4b55a1 977 down_write(&cinode->lock_sem);
85160e03 978
55157dfb 979 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
081c0414 980 lock->type, &conf_lock, CIFS_LOCK_OP);
85160e03 981 if (!exist && cinode->can_cache_brlcks) {
f45d3416 982 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 983 up_write(&cinode->lock_sem);
85160e03
PS
984 return rc;
985 }
986
987 if (!exist)
988 rc = 1;
989 else if (!wait)
990 rc = -EACCES;
991 else {
992 list_add_tail(&lock->blist, &conf_lock->blist);
1b4b55a1 993 up_write(&cinode->lock_sem);
85160e03
PS
994 rc = wait_event_interruptible(lock->block_q,
995 (lock->blist.prev == &lock->blist) &&
996 (lock->blist.next == &lock->blist));
997 if (!rc)
998 goto try_again;
1b4b55a1 999 down_write(&cinode->lock_sem);
a88b4707 1000 list_del_init(&lock->blist);
85160e03
PS
1001 }
1002
1b4b55a1 1003 up_write(&cinode->lock_sem);
85160e03
PS
1004 return rc;
1005}
1006
9a5101c8
PS
1007/*
1008 * Check if there is another lock that prevents us to set the lock (posix
1009 * style). If such a lock exists, update the flock structure with its
1010 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1011 * or leave it the same if we can't. Returns 0 if we don't need to request to
1012 * the server or 1 otherwise.
1013 */
85160e03 1014static int
4f6bcec9
PS
1015cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1016{
1017 int rc = 0;
496ad9aa 1018 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
4f6bcec9
PS
1019 unsigned char saved_type = flock->fl_type;
1020
50792760
PS
1021 if ((flock->fl_flags & FL_POSIX) == 0)
1022 return 1;
1023
1b4b55a1 1024 down_read(&cinode->lock_sem);
4f6bcec9
PS
1025 posix_test_lock(file, flock);
1026
1027 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1028 flock->fl_type = saved_type;
1029 rc = 1;
1030 }
1031
1b4b55a1 1032 up_read(&cinode->lock_sem);
4f6bcec9
PS
1033 return rc;
1034}
1035
9a5101c8
PS
1036/*
1037 * Set the byte-range lock (posix style). Returns:
1038 * 1) 0, if we set the lock and don't need to request to the server;
1039 * 2) 1, if we need to request to the server;
1040 * 3) <0, if the error occurs while setting the lock.
1041 */
4f6bcec9
PS
1042static int
1043cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1044{
496ad9aa 1045 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
50792760
PS
1046 int rc = 1;
1047
1048 if ((flock->fl_flags & FL_POSIX) == 0)
1049 return rc;
4f6bcec9 1050
66189be7 1051try_again:
1b4b55a1 1052 down_write(&cinode->lock_sem);
4f6bcec9 1053 if (!cinode->can_cache_brlcks) {
1b4b55a1 1054 up_write(&cinode->lock_sem);
50792760 1055 return rc;
4f6bcec9 1056 }
66189be7
PS
1057
1058 rc = posix_lock_file(file, flock, NULL);
1b4b55a1 1059 up_write(&cinode->lock_sem);
66189be7
PS
1060 if (rc == FILE_LOCK_DEFERRED) {
1061 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1062 if (!rc)
1063 goto try_again;
1a9e64a7 1064 posix_unblock_lock(flock);
66189be7 1065 }
9ebb389d 1066 return rc;
4f6bcec9
PS
1067}
1068
d39a4f71 1069int
4f6bcec9 1070cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03 1071{
6d5786a3
PS
1072 unsigned int xid;
1073 int rc = 0, stored_rc;
85160e03
PS
1074 struct cifsLockInfo *li, *tmp;
1075 struct cifs_tcon *tcon;
0013fb4c 1076 unsigned int num, max_num, max_buf;
32b9aaf1
PS
1077 LOCKING_ANDX_RANGE *buf, *cur;
1078 int types[] = {LOCKING_ANDX_LARGE_FILES,
1079 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1080 int i;
85160e03 1081
6d5786a3 1082 xid = get_xid();
85160e03
PS
1083 tcon = tlink_tcon(cfile->tlink);
1084
0013fb4c
PS
1085 /*
1086 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1087 * and check it for zero before using.
1088 */
1089 max_buf = tcon->ses->server->maxBuf;
1090 if (!max_buf) {
6d5786a3 1091 free_xid(xid);
0013fb4c
PS
1092 return -EINVAL;
1093 }
1094
1095 max_num = (max_buf - sizeof(struct smb_hdr)) /
1096 sizeof(LOCKING_ANDX_RANGE);
4b99d39b 1097 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
32b9aaf1 1098 if (!buf) {
6d5786a3 1099 free_xid(xid);
e2f2886a 1100 return -ENOMEM;
32b9aaf1
PS
1101 }
1102
1103 for (i = 0; i < 2; i++) {
1104 cur = buf;
1105 num = 0;
f45d3416 1106 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
32b9aaf1
PS
1107 if (li->type != types[i])
1108 continue;
1109 cur->Pid = cpu_to_le16(li->pid);
1110 cur->LengthLow = cpu_to_le32((u32)li->length);
1111 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1112 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1113 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1114 if (++num == max_num) {
4b4de76e
PS
1115 stored_rc = cifs_lockv(xid, tcon,
1116 cfile->fid.netfid,
04a6aa8a
PS
1117 (__u8)li->type, 0, num,
1118 buf);
32b9aaf1
PS
1119 if (stored_rc)
1120 rc = stored_rc;
1121 cur = buf;
1122 num = 0;
1123 } else
1124 cur++;
1125 }
1126
1127 if (num) {
4b4de76e 1128 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
04a6aa8a 1129 (__u8)types[i], 0, num, buf);
32b9aaf1
PS
1130 if (stored_rc)
1131 rc = stored_rc;
1132 }
85160e03
PS
1133 }
1134
32b9aaf1 1135 kfree(buf);
6d5786a3 1136 free_xid(xid);
85160e03
PS
1137 return rc;
1138}
1139
3d22462a
JL
1140static __u32
1141hash_lockowner(fl_owner_t owner)
1142{
1143 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1144}
1145
d5751469
PS
1146struct lock_to_push {
1147 struct list_head llist;
1148 __u64 offset;
1149 __u64 length;
1150 __u32 pid;
1151 __u16 netfid;
1152 __u8 type;
1153};
1154
4f6bcec9 1155static int
b8db928b 1156cifs_push_posix_locks(struct cifsFileInfo *cfile)
4f6bcec9 1157{
2b0143b5 1158 struct inode *inode = d_inode(cfile->dentry);
4f6bcec9 1159 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
bd61e0a9
JL
1160 struct file_lock *flock;
1161 struct file_lock_context *flctx = inode->i_flctx;
e084c1bd 1162 unsigned int count = 0, i;
4f6bcec9 1163 int rc = 0, xid, type;
d5751469
PS
1164 struct list_head locks_to_send, *el;
1165 struct lock_to_push *lck, *tmp;
4f6bcec9 1166 __u64 length;
4f6bcec9 1167
6d5786a3 1168 xid = get_xid();
4f6bcec9 1169
bd61e0a9
JL
1170 if (!flctx)
1171 goto out;
d5751469 1172
e084c1bd
JL
1173 spin_lock(&flctx->flc_lock);
1174 list_for_each(el, &flctx->flc_posix) {
1175 count++;
1176 }
1177 spin_unlock(&flctx->flc_lock);
1178
4f6bcec9
PS
1179 INIT_LIST_HEAD(&locks_to_send);
1180
d5751469 1181 /*
e084c1bd
JL
1182 * Allocating count locks is enough because no FL_POSIX locks can be
1183 * added to the list while we are holding cinode->lock_sem that
ce85852b 1184 * protects locking operations of this inode.
d5751469 1185 */
e084c1bd 1186 for (i = 0; i < count; i++) {
d5751469
PS
1187 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1188 if (!lck) {
1189 rc = -ENOMEM;
1190 goto err_out;
1191 }
1192 list_add_tail(&lck->llist, &locks_to_send);
1193 }
1194
d5751469 1195 el = locks_to_send.next;
6109c850 1196 spin_lock(&flctx->flc_lock);
bd61e0a9 1197 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
d5751469 1198 if (el == &locks_to_send) {
ce85852b
PS
1199 /*
1200 * The list ended. We don't have enough allocated
1201 * structures - something is really wrong.
1202 */
f96637be 1203 cifs_dbg(VFS, "Can't push all brlocks!\n");
d5751469
PS
1204 break;
1205 }
4f6bcec9
PS
1206 length = 1 + flock->fl_end - flock->fl_start;
1207 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1208 type = CIFS_RDLCK;
1209 else
1210 type = CIFS_WRLCK;
d5751469 1211 lck = list_entry(el, struct lock_to_push, llist);
3d22462a 1212 lck->pid = hash_lockowner(flock->fl_owner);
4b4de76e 1213 lck->netfid = cfile->fid.netfid;
d5751469
PS
1214 lck->length = length;
1215 lck->type = type;
1216 lck->offset = flock->fl_start;
4f6bcec9 1217 }
6109c850 1218 spin_unlock(&flctx->flc_lock);
4f6bcec9
PS
1219
1220 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
4f6bcec9
PS
1221 int stored_rc;
1222
4f6bcec9 1223 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
c5fd363d 1224 lck->offset, lck->length, NULL,
4f6bcec9
PS
1225 lck->type, 0);
1226 if (stored_rc)
1227 rc = stored_rc;
1228 list_del(&lck->llist);
1229 kfree(lck);
1230 }
1231
d5751469 1232out:
6d5786a3 1233 free_xid(xid);
4f6bcec9 1234 return rc;
d5751469
PS
1235err_out:
1236 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1237 list_del(&lck->llist);
1238 kfree(lck);
1239 }
1240 goto out;
4f6bcec9
PS
1241}
1242
9ec3c882 1243static int
b8db928b 1244cifs_push_locks(struct cifsFileInfo *cfile)
9ec3c882 1245{
b8db928b 1246 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2b0143b5 1247 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
b8db928b 1248 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
9ec3c882
PS
1249 int rc = 0;
1250
1251 /* we are going to update can_cache_brlcks here - need a write access */
1252 down_write(&cinode->lock_sem);
1253 if (!cinode->can_cache_brlcks) {
1254 up_write(&cinode->lock_sem);
1255 return rc;
1256 }
4f6bcec9 1257
29e20f9c 1258 if (cap_unix(tcon->ses) &&
4f6bcec9
PS
1259 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1260 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
b8db928b
PS
1261 rc = cifs_push_posix_locks(cfile);
1262 else
1263 rc = tcon->ses->server->ops->push_mand_locks(cfile);
4f6bcec9 1264
b8db928b
PS
1265 cinode->can_cache_brlcks = false;
1266 up_write(&cinode->lock_sem);
1267 return rc;
4f6bcec9
PS
1268}
1269
03776f45 1270static void
04a6aa8a 1271cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
106dc538 1272 bool *wait_flag, struct TCP_Server_Info *server)
1da177e4 1273{
03776f45 1274 if (flock->fl_flags & FL_POSIX)
f96637be 1275 cifs_dbg(FYI, "Posix\n");
03776f45 1276 if (flock->fl_flags & FL_FLOCK)
f96637be 1277 cifs_dbg(FYI, "Flock\n");
03776f45 1278 if (flock->fl_flags & FL_SLEEP) {
f96637be 1279 cifs_dbg(FYI, "Blocking lock\n");
03776f45 1280 *wait_flag = true;
1da177e4 1281 }
03776f45 1282 if (flock->fl_flags & FL_ACCESS)
f96637be 1283 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
03776f45 1284 if (flock->fl_flags & FL_LEASE)
f96637be 1285 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
03776f45 1286 if (flock->fl_flags &
3d6d854a
JL
1287 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1288 FL_ACCESS | FL_LEASE | FL_CLOSE)))
f96637be 1289 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1da177e4 1290
106dc538 1291 *type = server->vals->large_lock_type;
03776f45 1292 if (flock->fl_type == F_WRLCK) {
f96637be 1293 cifs_dbg(FYI, "F_WRLCK\n");
106dc538 1294 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1295 *lock = 1;
1296 } else if (flock->fl_type == F_UNLCK) {
f96637be 1297 cifs_dbg(FYI, "F_UNLCK\n");
106dc538 1298 *type |= server->vals->unlock_lock_type;
03776f45
PS
1299 *unlock = 1;
1300 /* Check if unlock includes more than one lock range */
1301 } else if (flock->fl_type == F_RDLCK) {
f96637be 1302 cifs_dbg(FYI, "F_RDLCK\n");
106dc538 1303 *type |= server->vals->shared_lock_type;
03776f45
PS
1304 *lock = 1;
1305 } else if (flock->fl_type == F_EXLCK) {
f96637be 1306 cifs_dbg(FYI, "F_EXLCK\n");
106dc538 1307 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1308 *lock = 1;
1309 } else if (flock->fl_type == F_SHLCK) {
f96637be 1310 cifs_dbg(FYI, "F_SHLCK\n");
106dc538 1311 *type |= server->vals->shared_lock_type;
03776f45 1312 *lock = 1;
1da177e4 1313 } else
f96637be 1314 cifs_dbg(FYI, "Unknown type of lock\n");
03776f45 1315}
1da177e4 1316
03776f45 1317static int
04a6aa8a 1318cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3 1319 bool wait_flag, bool posix_lck, unsigned int xid)
03776f45
PS
1320{
1321 int rc = 0;
1322 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1323 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1324 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1325 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e 1326 __u16 netfid = cfile->fid.netfid;
f05337c6 1327
03776f45
PS
1328 if (posix_lck) {
1329 int posix_lock_type;
4f6bcec9
PS
1330
1331 rc = cifs_posix_lock_test(file, flock);
1332 if (!rc)
1333 return rc;
1334
106dc538 1335 if (type & server->vals->shared_lock_type)
03776f45
PS
1336 posix_lock_type = CIFS_RDLCK;
1337 else
1338 posix_lock_type = CIFS_WRLCK;
3d22462a
JL
1339 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1340 hash_lockowner(flock->fl_owner),
c5fd363d 1341 flock->fl_start, length, flock,
4f6bcec9 1342 posix_lock_type, wait_flag);
03776f45
PS
1343 return rc;
1344 }
1da177e4 1345
fbd35aca 1346 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
85160e03
PS
1347 if (!rc)
1348 return rc;
1349
03776f45 1350 /* BB we could chain these into one lock request BB */
d39a4f71
PS
1351 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1352 1, 0, false);
03776f45 1353 if (rc == 0) {
d39a4f71
PS
1354 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1355 type, 0, 1, false);
03776f45
PS
1356 flock->fl_type = F_UNLCK;
1357 if (rc != 0)
f96637be
JP
1358 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1359 rc);
a88b4707 1360 return 0;
1da177e4 1361 }
7ee1af76 1362
106dc538 1363 if (type & server->vals->shared_lock_type) {
03776f45 1364 flock->fl_type = F_WRLCK;
a88b4707 1365 return 0;
7ee1af76
JA
1366 }
1367
d39a4f71
PS
1368 type &= ~server->vals->exclusive_lock_type;
1369
1370 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1371 type | server->vals->shared_lock_type,
1372 1, 0, false);
03776f45 1373 if (rc == 0) {
d39a4f71
PS
1374 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1375 type | server->vals->shared_lock_type, 0, 1, false);
03776f45
PS
1376 flock->fl_type = F_RDLCK;
1377 if (rc != 0)
f96637be
JP
1378 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1379 rc);
03776f45
PS
1380 } else
1381 flock->fl_type = F_WRLCK;
1382
a88b4707 1383 return 0;
03776f45
PS
1384}
1385
f7ba7fe6 1386void
9ee305b7
PS
1387cifs_move_llist(struct list_head *source, struct list_head *dest)
1388{
1389 struct list_head *li, *tmp;
1390 list_for_each_safe(li, tmp, source)
1391 list_move(li, dest);
1392}
1393
f7ba7fe6 1394void
9ee305b7
PS
1395cifs_free_llist(struct list_head *llist)
1396{
1397 struct cifsLockInfo *li, *tmp;
1398 list_for_each_entry_safe(li, tmp, llist, llist) {
1399 cifs_del_lock_waiters(li);
1400 list_del(&li->llist);
1401 kfree(li);
1402 }
1403}
1404
d39a4f71 1405int
6d5786a3
PS
1406cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1407 unsigned int xid)
9ee305b7
PS
1408{
1409 int rc = 0, stored_rc;
1410 int types[] = {LOCKING_ANDX_LARGE_FILES,
1411 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1412 unsigned int i;
0013fb4c 1413 unsigned int max_num, num, max_buf;
9ee305b7
PS
1414 LOCKING_ANDX_RANGE *buf, *cur;
1415 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2b0143b5 1416 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
9ee305b7
PS
1417 struct cifsLockInfo *li, *tmp;
1418 __u64 length = 1 + flock->fl_end - flock->fl_start;
1419 struct list_head tmp_llist;
1420
1421 INIT_LIST_HEAD(&tmp_llist);
1422
0013fb4c
PS
1423 /*
1424 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1425 * and check it for zero before using.
1426 */
1427 max_buf = tcon->ses->server->maxBuf;
1428 if (!max_buf)
1429 return -EINVAL;
1430
1431 max_num = (max_buf - sizeof(struct smb_hdr)) /
1432 sizeof(LOCKING_ANDX_RANGE);
4b99d39b 1433 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
9ee305b7
PS
1434 if (!buf)
1435 return -ENOMEM;
1436
1b4b55a1 1437 down_write(&cinode->lock_sem);
9ee305b7
PS
1438 for (i = 0; i < 2; i++) {
1439 cur = buf;
1440 num = 0;
f45d3416 1441 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
9ee305b7
PS
1442 if (flock->fl_start > li->offset ||
1443 (flock->fl_start + length) <
1444 (li->offset + li->length))
1445 continue;
1446 if (current->tgid != li->pid)
1447 continue;
9ee305b7
PS
1448 if (types[i] != li->type)
1449 continue;
ea319d57 1450 if (cinode->can_cache_brlcks) {
9ee305b7
PS
1451 /*
1452 * We can cache brlock requests - simply remove
fbd35aca 1453 * a lock from the file's list.
9ee305b7
PS
1454 */
1455 list_del(&li->llist);
1456 cifs_del_lock_waiters(li);
1457 kfree(li);
ea319d57 1458 continue;
9ee305b7 1459 }
ea319d57
PS
1460 cur->Pid = cpu_to_le16(li->pid);
1461 cur->LengthLow = cpu_to_le32((u32)li->length);
1462 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1463 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1464 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1465 /*
1466 * We need to save a lock here to let us add it again to
1467 * the file's list if the unlock range request fails on
1468 * the server.
1469 */
1470 list_move(&li->llist, &tmp_llist);
1471 if (++num == max_num) {
4b4de76e
PS
1472 stored_rc = cifs_lockv(xid, tcon,
1473 cfile->fid.netfid,
ea319d57
PS
1474 li->type, num, 0, buf);
1475 if (stored_rc) {
1476 /*
1477 * We failed on the unlock range
1478 * request - add all locks from the tmp
1479 * list to the head of the file's list.
1480 */
1481 cifs_move_llist(&tmp_llist,
f45d3416 1482 &cfile->llist->locks);
ea319d57
PS
1483 rc = stored_rc;
1484 } else
1485 /*
1486 * The unlock range request succeed -
1487 * free the tmp list.
1488 */
1489 cifs_free_llist(&tmp_llist);
1490 cur = buf;
1491 num = 0;
1492 } else
1493 cur++;
9ee305b7
PS
1494 }
1495 if (num) {
4b4de76e 1496 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
9ee305b7
PS
1497 types[i], num, 0, buf);
1498 if (stored_rc) {
f45d3416
PS
1499 cifs_move_llist(&tmp_llist,
1500 &cfile->llist->locks);
9ee305b7
PS
1501 rc = stored_rc;
1502 } else
1503 cifs_free_llist(&tmp_llist);
1504 }
1505 }
1506
1b4b55a1 1507 up_write(&cinode->lock_sem);
9ee305b7
PS
1508 kfree(buf);
1509 return rc;
1510}
1511
03776f45 1512static int
f45d3416 1513cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3
PS
1514 bool wait_flag, bool posix_lck, int lock, int unlock,
1515 unsigned int xid)
03776f45
PS
1516{
1517 int rc = 0;
1518 __u64 length = 1 + flock->fl_end - flock->fl_start;
1519 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1520 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1521 struct TCP_Server_Info *server = tcon->ses->server;
2b0143b5 1522 struct inode *inode = d_inode(cfile->dentry);
03776f45
PS
1523
1524 if (posix_lck) {
08547b03 1525 int posix_lock_type;
4f6bcec9
PS
1526
1527 rc = cifs_posix_lock_set(file, flock);
1528 if (!rc || rc < 0)
1529 return rc;
1530
106dc538 1531 if (type & server->vals->shared_lock_type)
08547b03
SF
1532 posix_lock_type = CIFS_RDLCK;
1533 else
1534 posix_lock_type = CIFS_WRLCK;
50c2f753 1535
03776f45 1536 if (unlock == 1)
beb84dc8 1537 posix_lock_type = CIFS_UNLCK;
7ee1af76 1538
f45d3416 1539 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
3d22462a
JL
1540 hash_lockowner(flock->fl_owner),
1541 flock->fl_start, length,
f45d3416 1542 NULL, posix_lock_type, wait_flag);
03776f45
PS
1543 goto out;
1544 }
7ee1af76 1545
03776f45 1546 if (lock) {
161ebf9f
PS
1547 struct cifsLockInfo *lock;
1548
fbd35aca 1549 lock = cifs_lock_init(flock->fl_start, length, type);
161ebf9f
PS
1550 if (!lock)
1551 return -ENOMEM;
1552
fbd35aca 1553 rc = cifs_lock_add_if(cfile, lock, wait_flag);
21cb2d90 1554 if (rc < 0) {
161ebf9f 1555 kfree(lock);
21cb2d90
PS
1556 return rc;
1557 }
1558 if (!rc)
85160e03
PS
1559 goto out;
1560
63b7d3a4
PS
1561 /*
1562 * Windows 7 server can delay breaking lease from read to None
1563 * if we set a byte-range lock on a file - break it explicitly
1564 * before sending the lock to the server to be sure the next
1565 * read won't conflict with non-overlapted locks due to
1566 * pagereading.
1567 */
18cceb6a
PS
1568 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1569 CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 1570 cifs_zap_mapping(inode);
f96637be
JP
1571 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1572 inode);
18cceb6a 1573 CIFS_I(inode)->oplock = 0;
63b7d3a4
PS
1574 }
1575
d39a4f71
PS
1576 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1577 type, 1, 0, wait_flag);
161ebf9f
PS
1578 if (rc) {
1579 kfree(lock);
21cb2d90 1580 return rc;
03776f45 1581 }
161ebf9f 1582
fbd35aca 1583 cifs_lock_add(cfile, lock);
9ee305b7 1584 } else if (unlock)
d39a4f71 1585 rc = server->ops->mand_unlock_range(cfile, flock, xid);
03776f45 1586
03776f45 1587out:
00b8c95b 1588 if (flock->fl_flags & FL_POSIX && !rc)
4f656367 1589 rc = locks_lock_file_wait(file, flock);
03776f45
PS
1590 return rc;
1591}
1592
1593int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1594{
1595 int rc, xid;
1596 int lock = 0, unlock = 0;
1597 bool wait_flag = false;
1598 bool posix_lck = false;
1599 struct cifs_sb_info *cifs_sb;
1600 struct cifs_tcon *tcon;
1601 struct cifsInodeInfo *cinode;
1602 struct cifsFileInfo *cfile;
1603 __u16 netfid;
04a6aa8a 1604 __u32 type;
03776f45
PS
1605
1606 rc = -EACCES;
6d5786a3 1607 xid = get_xid();
03776f45 1608
f96637be
JP
1609 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1610 cmd, flock->fl_flags, flock->fl_type,
1611 flock->fl_start, flock->fl_end);
03776f45 1612
03776f45
PS
1613 cfile = (struct cifsFileInfo *)file->private_data;
1614 tcon = tlink_tcon(cfile->tlink);
106dc538
PS
1615
1616 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1617 tcon->ses->server);
1618
7119e220 1619 cifs_sb = CIFS_FILE_SB(file);
4b4de76e 1620 netfid = cfile->fid.netfid;
496ad9aa 1621 cinode = CIFS_I(file_inode(file));
03776f45 1622
29e20f9c 1623 if (cap_unix(tcon->ses) &&
03776f45
PS
1624 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1625 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1626 posix_lck = true;
1627 /*
1628 * BB add code here to normalize offset and length to account for
1629 * negative length which we can not accept over the wire.
1630 */
1631 if (IS_GETLK(cmd)) {
4f6bcec9 1632 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
6d5786a3 1633 free_xid(xid);
03776f45
PS
1634 return rc;
1635 }
1636
1637 if (!lock && !unlock) {
1638 /*
1639 * if no lock or unlock then nothing to do since we do not
1640 * know what it is
1641 */
6d5786a3 1642 free_xid(xid);
03776f45 1643 return -EOPNOTSUPP;
7ee1af76
JA
1644 }
1645
03776f45
PS
1646 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1647 xid);
6d5786a3 1648 free_xid(xid);
1da177e4
LT
1649 return rc;
1650}
1651
597b027f
JL
1652/*
1653 * update the file size (if needed) after a write. Should be called with
1654 * the inode->i_lock held
1655 */
72432ffc 1656void
fbec9ab9
JL
1657cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1658 unsigned int bytes_written)
1659{
1660 loff_t end_of_write = offset + bytes_written;
1661
1662 if (end_of_write > cifsi->server_eof)
1663 cifsi->server_eof = end_of_write;
1664}
1665
ba9ad725
PS
1666static ssize_t
1667cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1668 size_t write_size, loff_t *offset)
1da177e4
LT
1669{
1670 int rc = 0;
1671 unsigned int bytes_written = 0;
1672 unsigned int total_written;
1673 struct cifs_sb_info *cifs_sb;
ba9ad725
PS
1674 struct cifs_tcon *tcon;
1675 struct TCP_Server_Info *server;
6d5786a3 1676 unsigned int xid;
7da4b49a 1677 struct dentry *dentry = open_file->dentry;
2b0143b5 1678 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
fa2989f4 1679 struct cifs_io_parms io_parms;
1da177e4 1680
7da4b49a 1681 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1682
35c265e0
AV
1683 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1684 write_size, *offset, dentry);
1da177e4 1685
ba9ad725
PS
1686 tcon = tlink_tcon(open_file->tlink);
1687 server = tcon->ses->server;
1688
1689 if (!server->ops->sync_write)
1690 return -ENOSYS;
50c2f753 1691
6d5786a3 1692 xid = get_xid();
1da177e4 1693
1da177e4
LT
1694 for (total_written = 0; write_size > total_written;
1695 total_written += bytes_written) {
1696 rc = -EAGAIN;
1697 while (rc == -EAGAIN) {
ca83ce3d
JL
1698 struct kvec iov[2];
1699 unsigned int len;
1700
1da177e4 1701 if (open_file->invalidHandle) {
1da177e4
LT
1702 /* we could deadlock if we called
1703 filemap_fdatawait from here so tell
fb8c4b14 1704 reopen_file not to flush data to
1da177e4 1705 server now */
15886177 1706 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1707 if (rc != 0)
1708 break;
1709 }
ca83ce3d 1710
2b0143b5 1711 len = min(server->ops->wp_retry_size(d_inode(dentry)),
cb7e9eab 1712 (unsigned int)write_size - total_written);
ca83ce3d
JL
1713 /* iov[0] is reserved for smb header */
1714 iov[1].iov_base = (char *)write_data + total_written;
1715 iov[1].iov_len = len;
fa2989f4 1716 io_parms.pid = pid;
ba9ad725
PS
1717 io_parms.tcon = tcon;
1718 io_parms.offset = *offset;
fa2989f4 1719 io_parms.length = len;
db8b631d
SF
1720 rc = server->ops->sync_write(xid, &open_file->fid,
1721 &io_parms, &bytes_written, iov, 1);
1da177e4
LT
1722 }
1723 if (rc || (bytes_written == 0)) {
1724 if (total_written)
1725 break;
1726 else {
6d5786a3 1727 free_xid(xid);
1da177e4
LT
1728 return rc;
1729 }
fbec9ab9 1730 } else {
2b0143b5 1731 spin_lock(&d_inode(dentry)->i_lock);
ba9ad725 1732 cifs_update_eof(cifsi, *offset, bytes_written);
2b0143b5 1733 spin_unlock(&d_inode(dentry)->i_lock);
ba9ad725 1734 *offset += bytes_written;
fbec9ab9 1735 }
1da177e4
LT
1736 }
1737
ba9ad725 1738 cifs_stats_bytes_written(tcon, total_written);
1da177e4 1739
7da4b49a 1740 if (total_written > 0) {
2b0143b5
DH
1741 spin_lock(&d_inode(dentry)->i_lock);
1742 if (*offset > d_inode(dentry)->i_size)
1743 i_size_write(d_inode(dentry), *offset);
1744 spin_unlock(&d_inode(dentry)->i_lock);
1da177e4 1745 }
2b0143b5 1746 mark_inode_dirty_sync(d_inode(dentry));
6d5786a3 1747 free_xid(xid);
1da177e4
LT
1748 return total_written;
1749}
1750
6508d904
JL
1751struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1752 bool fsuid_only)
630f3f0c
SF
1753{
1754 struct cifsFileInfo *open_file = NULL;
6508d904 1755 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
3afca265 1756 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
6508d904
JL
1757
1758 /* only filter by fsuid on multiuser mounts */
1759 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1760 fsuid_only = false;
630f3f0c 1761
3afca265 1762 spin_lock(&tcon->open_file_lock);
630f3f0c
SF
1763 /* we could simply get the first_list_entry since write-only entries
1764 are always at the end of the list but since the first entry might
1765 have a close pending, we go through the whole list */
1766 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
fef59fd7 1767 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
6508d904 1768 continue;
2e396b83 1769 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1770 if (!open_file->invalidHandle) {
1771 /* found a good file */
1772 /* lock it so it will not be closed on us */
3afca265
SF
1773 cifsFileInfo_get(open_file);
1774 spin_unlock(&tcon->open_file_lock);
630f3f0c
SF
1775 return open_file;
1776 } /* else might as well continue, and look for
1777 another, or simply have the caller reopen it
1778 again rather than trying to fix this handle */
1779 } else /* write only file */
1780 break; /* write only files are last so must be done */
1781 }
3afca265 1782 spin_unlock(&tcon->open_file_lock);
630f3f0c
SF
1783 return NULL;
1784}
630f3f0c 1785
6508d904
JL
1786struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1787 bool fsuid_only)
6148a742 1788{
2c0c2a08 1789 struct cifsFileInfo *open_file, *inv_file = NULL;
d3892294 1790 struct cifs_sb_info *cifs_sb;
3afca265 1791 struct cifs_tcon *tcon;
2846d386 1792 bool any_available = false;
dd99cd80 1793 int rc;
2c0c2a08 1794 unsigned int refind = 0;
6148a742 1795
60808233
SF
1796 /* Having a null inode here (because mapping->host was set to zero by
1797 the VFS or MM) should not happen but we had reports of on oops (due to
1798 it being zero) during stress testcases so we need to check for it */
1799
fb8c4b14 1800 if (cifs_inode == NULL) {
f96637be 1801 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
60808233
SF
1802 dump_stack();
1803 return NULL;
1804 }
1805
d3892294 1806 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
3afca265 1807 tcon = cifs_sb_master_tcon(cifs_sb);
d3892294 1808
6508d904
JL
1809 /* only filter by fsuid on multiuser mounts */
1810 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1811 fsuid_only = false;
1812
3afca265 1813 spin_lock(&tcon->open_file_lock);
9b22b0b7 1814refind_writable:
2c0c2a08 1815 if (refind > MAX_REOPEN_ATT) {
3afca265 1816 spin_unlock(&tcon->open_file_lock);
2c0c2a08
SP
1817 return NULL;
1818 }
6148a742 1819 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1820 if (!any_available && open_file->pid != current->tgid)
1821 continue;
fef59fd7 1822 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
6148a742 1823 continue;
2e396b83 1824 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
9b22b0b7
SF
1825 if (!open_file->invalidHandle) {
1826 /* found a good writable file */
3afca265
SF
1827 cifsFileInfo_get(open_file);
1828 spin_unlock(&tcon->open_file_lock);
9b22b0b7 1829 return open_file;
2c0c2a08
SP
1830 } else {
1831 if (!inv_file)
1832 inv_file = open_file;
9b22b0b7 1833 }
6148a742
SF
1834 }
1835 }
2846d386
JL
1836 /* couldn't find useable FH with same pid, try any available */
1837 if (!any_available) {
1838 any_available = true;
1839 goto refind_writable;
1840 }
2c0c2a08
SP
1841
1842 if (inv_file) {
1843 any_available = false;
3afca265 1844 cifsFileInfo_get(inv_file);
2c0c2a08
SP
1845 }
1846
3afca265 1847 spin_unlock(&tcon->open_file_lock);
2c0c2a08
SP
1848
1849 if (inv_file) {
1850 rc = cifs_reopen_file(inv_file, false);
1851 if (!rc)
1852 return inv_file;
1853 else {
3afca265 1854 spin_lock(&tcon->open_file_lock);
2c0c2a08
SP
1855 list_move_tail(&inv_file->flist,
1856 &cifs_inode->openFileList);
3afca265 1857 spin_unlock(&tcon->open_file_lock);
2c0c2a08 1858 cifsFileInfo_put(inv_file);
2c0c2a08 1859 ++refind;
e1e9bda2 1860 inv_file = NULL;
3afca265 1861 spin_lock(&tcon->open_file_lock);
2c0c2a08
SP
1862 goto refind_writable;
1863 }
1864 }
1865
6148a742
SF
1866 return NULL;
1867}
1868
1da177e4
LT
1869static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1870{
1871 struct address_space *mapping = page->mapping;
09cbfeaf 1872 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1da177e4
LT
1873 char *write_data;
1874 int rc = -EFAULT;
1875 int bytes_written = 0;
1da177e4 1876 struct inode *inode;
6148a742 1877 struct cifsFileInfo *open_file;
1da177e4
LT
1878
1879 if (!mapping || !mapping->host)
1880 return -EFAULT;
1881
1882 inode = page->mapping->host;
1da177e4
LT
1883
1884 offset += (loff_t)from;
1885 write_data = kmap(page);
1886 write_data += from;
1887
09cbfeaf 1888 if ((to > PAGE_SIZE) || (from > to)) {
1da177e4
LT
1889 kunmap(page);
1890 return -EIO;
1891 }
1892
1893 /* racing with truncate? */
1894 if (offset > mapping->host->i_size) {
1895 kunmap(page);
1896 return 0; /* don't care */
1897 }
1898
1899 /* check to make sure that we are not extending the file */
1900 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1901 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1902
6508d904 1903 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1904 if (open_file) {
fa2989f4
PS
1905 bytes_written = cifs_write(open_file, open_file->pid,
1906 write_data, to - from, &offset);
6ab409b5 1907 cifsFileInfo_put(open_file);
1da177e4 1908 /* Does mm or vfs already set times? */
c2050a45 1909 inode->i_atime = inode->i_mtime = current_time(inode);
bb5a9a04 1910 if ((bytes_written > 0) && (offset))
6148a742 1911 rc = 0;
bb5a9a04
SF
1912 else if (bytes_written < 0)
1913 rc = bytes_written;
6148a742 1914 } else {
f96637be 1915 cifs_dbg(FYI, "No writeable filehandles for inode\n");
1da177e4
LT
1916 rc = -EIO;
1917 }
1918
1919 kunmap(page);
1920 return rc;
1921}
1922
90ac1387
PS
1923static struct cifs_writedata *
1924wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1925 pgoff_t end, pgoff_t *index,
1926 unsigned int *found_pages)
1927{
1928 unsigned int nr_pages;
1929 struct page **pages;
1930 struct cifs_writedata *wdata;
1931
1932 wdata = cifs_writedata_alloc((unsigned int)tofind,
1933 cifs_writev_complete);
1934 if (!wdata)
1935 return NULL;
1936
1937 /*
1938 * find_get_pages_tag seems to return a max of 256 on each
1939 * iteration, so we must call it several times in order to
1940 * fill the array or the wsize is effectively limited to
ea1754a0 1941 * 256 * PAGE_SIZE.
90ac1387
PS
1942 */
1943 *found_pages = 0;
1944 pages = wdata->pages;
1945 do {
1946 nr_pages = find_get_pages_tag(mapping, index,
1947 PAGECACHE_TAG_DIRTY, tofind,
1948 pages);
1949 *found_pages += nr_pages;
1950 tofind -= nr_pages;
1951 pages += nr_pages;
1952 } while (nr_pages && tofind && *index <= end);
1953
1954 return wdata;
1955}
1956
7e48ff82
PS
1957static unsigned int
1958wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1959 struct address_space *mapping,
1960 struct writeback_control *wbc,
1961 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
1962{
1963 unsigned int nr_pages = 0, i;
1964 struct page *page;
1965
1966 for (i = 0; i < found_pages; i++) {
1967 page = wdata->pages[i];
1968 /*
1969 * At this point we hold neither mapping->tree_lock nor
1970 * lock on the page itself: the page may be truncated or
1971 * invalidated (changing page->mapping to NULL), or even
1972 * swizzled back from swapper_space to tmpfs file
1973 * mapping
1974 */
1975
1976 if (nr_pages == 0)
1977 lock_page(page);
1978 else if (!trylock_page(page))
1979 break;
1980
1981 if (unlikely(page->mapping != mapping)) {
1982 unlock_page(page);
1983 break;
1984 }
1985
1986 if (!wbc->range_cyclic && page->index > end) {
1987 *done = true;
1988 unlock_page(page);
1989 break;
1990 }
1991
1992 if (*next && (page->index != *next)) {
1993 /* Not next consecutive page */
1994 unlock_page(page);
1995 break;
1996 }
1997
1998 if (wbc->sync_mode != WB_SYNC_NONE)
1999 wait_on_page_writeback(page);
2000
2001 if (PageWriteback(page) ||
2002 !clear_page_dirty_for_io(page)) {
2003 unlock_page(page);
2004 break;
2005 }
2006
2007 /*
2008 * This actually clears the dirty bit in the radix tree.
2009 * See cifs_writepage() for more commentary.
2010 */
2011 set_page_writeback(page);
2012 if (page_offset(page) >= i_size_read(mapping->host)) {
2013 *done = true;
2014 unlock_page(page);
2015 end_page_writeback(page);
2016 break;
2017 }
2018
2019 wdata->pages[i] = page;
2020 *next = page->index + 1;
2021 ++nr_pages;
2022 }
2023
2024 /* reset index to refind any pages skipped */
2025 if (nr_pages == 0)
2026 *index = wdata->pages[0]->index + 1;
2027
2028 /* put any pages we aren't going to use */
2029 for (i = nr_pages; i < found_pages; i++) {
09cbfeaf 2030 put_page(wdata->pages[i]);
7e48ff82
PS
2031 wdata->pages[i] = NULL;
2032 }
2033
2034 return nr_pages;
2035}
2036
619aa48e
PS
2037static int
2038wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2039 struct address_space *mapping, struct writeback_control *wbc)
2040{
2041 int rc = 0;
2042 struct TCP_Server_Info *server;
2043 unsigned int i;
2044
2045 wdata->sync_mode = wbc->sync_mode;
2046 wdata->nr_pages = nr_pages;
2047 wdata->offset = page_offset(wdata->pages[0]);
09cbfeaf 2048 wdata->pagesz = PAGE_SIZE;
619aa48e
PS
2049 wdata->tailsz = min(i_size_read(mapping->host) -
2050 page_offset(wdata->pages[nr_pages - 1]),
09cbfeaf
KS
2051 (loff_t)PAGE_SIZE);
2052 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
619aa48e 2053
66231a47
PS
2054 if (wdata->cfile != NULL)
2055 cifsFileInfo_put(wdata->cfile);
2056 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2057 if (!wdata->cfile) {
2058 cifs_dbg(VFS, "No writable handles for inode\n");
2059 rc = -EBADF;
2060 } else {
619aa48e
PS
2061 wdata->pid = wdata->cfile->pid;
2062 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2063 rc = server->ops->async_writev(wdata, cifs_writedata_release);
66231a47 2064 }
619aa48e
PS
2065
2066 for (i = 0; i < nr_pages; ++i)
2067 unlock_page(wdata->pages[i]);
2068
2069 return rc;
2070}
2071
1da177e4 2072static int cifs_writepages(struct address_space *mapping,
37c0eb46 2073 struct writeback_control *wbc)
1da177e4 2074{
c3d17b63 2075 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
cb7e9eab 2076 struct TCP_Server_Info *server;
c3d17b63
JL
2077 bool done = false, scanned = false, range_whole = false;
2078 pgoff_t end, index;
2079 struct cifs_writedata *wdata;
37c0eb46 2080 int rc = 0;
50c2f753 2081
37c0eb46 2082 /*
c3d17b63 2083 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
2084 * one page at a time via cifs_writepage
2085 */
09cbfeaf 2086 if (cifs_sb->wsize < PAGE_SIZE)
37c0eb46
SF
2087 return generic_writepages(mapping, wbc);
2088
111ebb6e 2089 if (wbc->range_cyclic) {
37c0eb46 2090 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
2091 end = -1;
2092 } else {
09cbfeaf
KS
2093 index = wbc->range_start >> PAGE_SHIFT;
2094 end = wbc->range_end >> PAGE_SHIFT;
111ebb6e 2095 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
2096 range_whole = true;
2097 scanned = true;
37c0eb46 2098 }
cb7e9eab 2099 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
37c0eb46 2100retry:
c3d17b63 2101 while (!done && index <= end) {
cb7e9eab 2102 unsigned int i, nr_pages, found_pages, wsize, credits;
66231a47 2103 pgoff_t next = 0, tofind, saved_index = index;
c3d17b63 2104
cb7e9eab
PS
2105 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2106 &wsize, &credits);
2107 if (rc)
2108 break;
c3d17b63 2109
09cbfeaf 2110 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
c3d17b63 2111
90ac1387
PS
2112 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2113 &found_pages);
c3d17b63
JL
2114 if (!wdata) {
2115 rc = -ENOMEM;
cb7e9eab 2116 add_credits_and_wake_if(server, credits, 0);
c3d17b63
JL
2117 break;
2118 }
2119
c3d17b63
JL
2120 if (found_pages == 0) {
2121 kref_put(&wdata->refcount, cifs_writedata_release);
cb7e9eab 2122 add_credits_and_wake_if(server, credits, 0);
c3d17b63
JL
2123 break;
2124 }
2125
7e48ff82
PS
2126 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2127 end, &index, &next, &done);
37c0eb46 2128
c3d17b63
JL
2129 /* nothing to write? */
2130 if (nr_pages == 0) {
2131 kref_put(&wdata->refcount, cifs_writedata_release);
cb7e9eab 2132 add_credits_and_wake_if(server, credits, 0);
c3d17b63 2133 continue;
37c0eb46 2134 }
fbec9ab9 2135
cb7e9eab 2136 wdata->credits = credits;
941b853d 2137
619aa48e 2138 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
f3983c21 2139
c3d17b63
JL
2140 /* send failure -- clean up the mess */
2141 if (rc != 0) {
cb7e9eab 2142 add_credits_and_wake_if(server, wdata->credits, 0);
c3d17b63 2143 for (i = 0; i < nr_pages; ++i) {
941b853d 2144 if (rc == -EAGAIN)
c3d17b63
JL
2145 redirty_page_for_writepage(wbc,
2146 wdata->pages[i]);
2147 else
2148 SetPageError(wdata->pages[i]);
2149 end_page_writeback(wdata->pages[i]);
09cbfeaf 2150 put_page(wdata->pages[i]);
37c0eb46 2151 }
941b853d
JL
2152 if (rc != -EAGAIN)
2153 mapping_set_error(mapping, rc);
c3d17b63
JL
2154 }
2155 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 2156
66231a47
PS
2157 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2158 index = saved_index;
2159 continue;
2160 }
2161
c3d17b63
JL
2162 wbc->nr_to_write -= nr_pages;
2163 if (wbc->nr_to_write <= 0)
2164 done = true;
b066a48c 2165
c3d17b63 2166 index = next;
37c0eb46 2167 }
c3d17b63 2168
37c0eb46
SF
2169 if (!scanned && !done) {
2170 /*
2171 * We hit the last page and there is more work to be done: wrap
2172 * back to the start of the file
2173 */
c3d17b63 2174 scanned = true;
37c0eb46
SF
2175 index = 0;
2176 goto retry;
2177 }
c3d17b63 2178
111ebb6e 2179 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
2180 mapping->writeback_index = index;
2181
1da177e4
LT
2182 return rc;
2183}
1da177e4 2184
9ad1506b
PS
2185static int
2186cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 2187{
9ad1506b 2188 int rc;
6d5786a3 2189 unsigned int xid;
1da177e4 2190
6d5786a3 2191 xid = get_xid();
1da177e4 2192/* BB add check for wbc flags */
09cbfeaf 2193 get_page(page);
ad7a2926 2194 if (!PageUptodate(page))
f96637be 2195 cifs_dbg(FYI, "ppw - page not up to date\n");
cb876f45
LT
2196
2197 /*
2198 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2199 *
2200 * A writepage() implementation always needs to do either this,
2201 * or re-dirty the page with "redirty_page_for_writepage()" in
2202 * the case of a failure.
2203 *
2204 * Just unlocking the page will cause the radix tree tag-bits
2205 * to fail to update with the state of the page correctly.
2206 */
fb8c4b14 2207 set_page_writeback(page);
9ad1506b 2208retry_write:
09cbfeaf 2209 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
9ad1506b
PS
2210 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2211 goto retry_write;
2212 else if (rc == -EAGAIN)
2213 redirty_page_for_writepage(wbc, page);
2214 else if (rc != 0)
2215 SetPageError(page);
2216 else
2217 SetPageUptodate(page);
cb876f45 2218 end_page_writeback(page);
09cbfeaf 2219 put_page(page);
6d5786a3 2220 free_xid(xid);
1da177e4
LT
2221 return rc;
2222}
2223
9ad1506b
PS
2224static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2225{
2226 int rc = cifs_writepage_locked(page, wbc);
2227 unlock_page(page);
2228 return rc;
2229}
2230
d9414774
NP
2231static int cifs_write_end(struct file *file, struct address_space *mapping,
2232 loff_t pos, unsigned len, unsigned copied,
2233 struct page *page, void *fsdata)
1da177e4 2234{
d9414774
NP
2235 int rc;
2236 struct inode *inode = mapping->host;
d4ffff1f
PS
2237 struct cifsFileInfo *cfile = file->private_data;
2238 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2239 __u32 pid;
2240
2241 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2242 pid = cfile->pid;
2243 else
2244 pid = current->tgid;
1da177e4 2245
f96637be 2246 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
b6b38f70 2247 page, pos, copied);
d9414774 2248
a98ee8c1
JL
2249 if (PageChecked(page)) {
2250 if (copied == len)
2251 SetPageUptodate(page);
2252 ClearPageChecked(page);
09cbfeaf 2253 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
d9414774 2254 SetPageUptodate(page);
ad7a2926 2255
1da177e4 2256 if (!PageUptodate(page)) {
d9414774 2257 char *page_data;
09cbfeaf 2258 unsigned offset = pos & (PAGE_SIZE - 1);
6d5786a3 2259 unsigned int xid;
d9414774 2260
6d5786a3 2261 xid = get_xid();
1da177e4
LT
2262 /* this is probably better than directly calling
2263 partialpage_write since in this function the file handle is
2264 known which we might as well leverage */
2265 /* BB check if anything else missing out of ppw
2266 such as updating last write time */
2267 page_data = kmap(page);
d4ffff1f 2268 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 2269 /* if (rc < 0) should we set writebehind rc? */
1da177e4 2270 kunmap(page);
d9414774 2271
6d5786a3 2272 free_xid(xid);
fb8c4b14 2273 } else {
d9414774
NP
2274 rc = copied;
2275 pos += copied;
ca8aa29c 2276 set_page_dirty(page);
1da177e4
LT
2277 }
2278
d9414774
NP
2279 if (rc > 0) {
2280 spin_lock(&inode->i_lock);
2281 if (pos > inode->i_size)
2282 i_size_write(inode, pos);
2283 spin_unlock(&inode->i_lock);
2284 }
2285
2286 unlock_page(page);
09cbfeaf 2287 put_page(page);
d9414774 2288
1da177e4
LT
2289 return rc;
2290}
2291
02c24a82
JB
2292int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2293 int datasync)
1da177e4 2294{
6d5786a3 2295 unsigned int xid;
1da177e4 2296 int rc = 0;
96daf2b0 2297 struct cifs_tcon *tcon;
1d8c4c00 2298 struct TCP_Server_Info *server;
c21dfb69 2299 struct cifsFileInfo *smbfile = file->private_data;
496ad9aa 2300 struct inode *inode = file_inode(file);
8be7e6ba 2301 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2302
02c24a82
JB
2303 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2304 if (rc)
2305 return rc;
5955102c 2306 inode_lock(inode);
02c24a82 2307
6d5786a3 2308 xid = get_xid();
1da177e4 2309
35c265e0
AV
2310 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2311 file, datasync);
50c2f753 2312
18cceb6a 2313 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 2314 rc = cifs_zap_mapping(inode);
6feb9891 2315 if (rc) {
f96637be 2316 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
6feb9891
PS
2317 rc = 0; /* don't care about it in fsync */
2318 }
2319 }
eb4b756b 2320
8be7e6ba 2321 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2322 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2323 server = tcon->ses->server;
2324 if (server->ops->flush)
2325 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2326 else
2327 rc = -ENOSYS;
2328 }
8be7e6ba 2329
6d5786a3 2330 free_xid(xid);
5955102c 2331 inode_unlock(inode);
8be7e6ba
PS
2332 return rc;
2333}
2334
02c24a82 2335int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba 2336{
6d5786a3 2337 unsigned int xid;
8be7e6ba 2338 int rc = 0;
96daf2b0 2339 struct cifs_tcon *tcon;
1d8c4c00 2340 struct TCP_Server_Info *server;
8be7e6ba 2341 struct cifsFileInfo *smbfile = file->private_data;
7119e220 2342 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
02c24a82
JB
2343 struct inode *inode = file->f_mapping->host;
2344
2345 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2346 if (rc)
2347 return rc;
5955102c 2348 inode_lock(inode);
8be7e6ba 2349
6d5786a3 2350 xid = get_xid();
8be7e6ba 2351
35c265e0
AV
2352 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2353 file, datasync);
8be7e6ba
PS
2354
2355 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2356 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2357 server = tcon->ses->server;
2358 if (server->ops->flush)
2359 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2360 else
2361 rc = -ENOSYS;
2362 }
b298f223 2363
6d5786a3 2364 free_xid(xid);
5955102c 2365 inode_unlock(inode);
1da177e4
LT
2366 return rc;
2367}
2368
1da177e4
LT
2369/*
2370 * As file closes, flush all cached write data for this inode checking
2371 * for write behind errors.
2372 */
75e1fcc0 2373int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2374{
496ad9aa 2375 struct inode *inode = file_inode(file);
1da177e4
LT
2376 int rc = 0;
2377
eb4b756b 2378 if (file->f_mode & FMODE_WRITE)
d3f1322a 2379 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2380
f96637be 2381 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
1da177e4
LT
2382
2383 return rc;
2384}
2385
72432ffc
PS
2386static int
2387cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2388{
2389 int rc = 0;
2390 unsigned long i;
2391
2392 for (i = 0; i < num_pages; i++) {
e94f7ba1 2393 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
72432ffc
PS
2394 if (!pages[i]) {
2395 /*
2396 * save number of pages we have already allocated and
2397 * return with ENOMEM error
2398 */
2399 num_pages = i;
2400 rc = -ENOMEM;
e94f7ba1 2401 break;
72432ffc
PS
2402 }
2403 }
2404
e94f7ba1
JL
2405 if (rc) {
2406 for (i = 0; i < num_pages; i++)
2407 put_page(pages[i]);
2408 }
72432ffc
PS
2409 return rc;
2410}
2411
2412static inline
2413size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2414{
2415 size_t num_pages;
2416 size_t clen;
2417
2418 clen = min_t(const size_t, len, wsize);
a7103b99 2419 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
72432ffc
PS
2420
2421 if (cur_len)
2422 *cur_len = clen;
2423
2424 return num_pages;
2425}
2426
da82f7e7 2427static void
4a5c80d7 2428cifs_uncached_writedata_release(struct kref *refcount)
da82f7e7
JL
2429{
2430 int i;
4a5c80d7
SF
2431 struct cifs_writedata *wdata = container_of(refcount,
2432 struct cifs_writedata, refcount);
2433
2434 for (i = 0; i < wdata->nr_pages; i++)
2435 put_page(wdata->pages[i]);
2436 cifs_writedata_release(refcount);
2437}
2438
2439static void
2440cifs_uncached_writev_complete(struct work_struct *work)
2441{
da82f7e7
JL
2442 struct cifs_writedata *wdata = container_of(work,
2443 struct cifs_writedata, work);
2b0143b5 2444 struct inode *inode = d_inode(wdata->cfile->dentry);
da82f7e7
JL
2445 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2446
2447 spin_lock(&inode->i_lock);
2448 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2449 if (cifsi->server_eof > inode->i_size)
2450 i_size_write(inode, cifsi->server_eof);
2451 spin_unlock(&inode->i_lock);
2452
2453 complete(&wdata->done);
2454
4a5c80d7 2455 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
da82f7e7
JL
2456}
2457
da82f7e7 2458static int
66386c08
PS
2459wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2460 size_t *len, unsigned long *num_pages)
da82f7e7 2461{
66386c08
PS
2462 size_t save_len, copied, bytes, cur_len = *len;
2463 unsigned long i, nr_pages = *num_pages;
c9de5c80 2464
66386c08
PS
2465 save_len = cur_len;
2466 for (i = 0; i < nr_pages; i++) {
2467 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2468 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2469 cur_len -= copied;
2470 /*
2471 * If we didn't copy as much as we expected, then that
2472 * may mean we trod into an unmapped area. Stop copying
2473 * at that point. On the next pass through the big
2474 * loop, we'll likely end up getting a zero-length
2475 * write and bailing out of it.
2476 */
2477 if (copied < bytes)
2478 break;
2479 }
2480 cur_len = save_len - cur_len;
2481 *len = cur_len;
da82f7e7 2482
66386c08
PS
2483 /*
2484 * If we have no data to send, then that probably means that
2485 * the copy above failed altogether. That's most likely because
2486 * the address in the iovec was bogus. Return -EFAULT and let
2487 * the caller free anything we allocated and bail out.
2488 */
2489 if (!cur_len)
2490 return -EFAULT;
da82f7e7 2491
66386c08
PS
2492 /*
2493 * i + 1 now represents the number of pages we actually used in
2494 * the copy phase above.
2495 */
2496 *num_pages = i + 1;
2497 return 0;
da82f7e7
JL
2498}
2499
43de94ea
PS
2500static int
2501cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2502 struct cifsFileInfo *open_file,
2503 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
72432ffc 2504{
43de94ea
PS
2505 int rc = 0;
2506 size_t cur_len;
66386c08 2507 unsigned long nr_pages, num_pages, i;
43de94ea 2508 struct cifs_writedata *wdata;
fc56b983 2509 struct iov_iter saved_from = *from;
6ec0b01b 2510 loff_t saved_offset = offset;
da82f7e7 2511 pid_t pid;
6ec0b01b 2512 struct TCP_Server_Info *server;
d4ffff1f
PS
2513
2514 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2515 pid = open_file->pid;
2516 else
2517 pid = current->tgid;
2518
6ec0b01b 2519 server = tlink_tcon(open_file->tlink)->ses->server;
6ec0b01b 2520
72432ffc 2521 do {
cb7e9eab
PS
2522 unsigned int wsize, credits;
2523
2524 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2525 &wsize, &credits);
2526 if (rc)
2527 break;
da82f7e7 2528
cb7e9eab 2529 nr_pages = get_numpages(wsize, len, &cur_len);
da82f7e7
JL
2530 wdata = cifs_writedata_alloc(nr_pages,
2531 cifs_uncached_writev_complete);
2532 if (!wdata) {
2533 rc = -ENOMEM;
cb7e9eab 2534 add_credits_and_wake_if(server, credits, 0);
da82f7e7
JL
2535 break;
2536 }
2537
2538 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2539 if (rc) {
2540 kfree(wdata);
cb7e9eab 2541 add_credits_and_wake_if(server, credits, 0);
da82f7e7
JL
2542 break;
2543 }
2544
66386c08
PS
2545 num_pages = nr_pages;
2546 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2547 if (rc) {
5d81de8e
JL
2548 for (i = 0; i < nr_pages; i++)
2549 put_page(wdata->pages[i]);
2550 kfree(wdata);
cb7e9eab 2551 add_credits_and_wake_if(server, credits, 0);
5d81de8e
JL
2552 break;
2553 }
2554
2555 /*
66386c08
PS
2556 * Bring nr_pages down to the number of pages we actually used,
2557 * and free any pages that we didn't use.
5d81de8e 2558 */
66386c08 2559 for ( ; nr_pages > num_pages; nr_pages--)
5d81de8e
JL
2560 put_page(wdata->pages[nr_pages - 1]);
2561
da82f7e7
JL
2562 wdata->sync_mode = WB_SYNC_ALL;
2563 wdata->nr_pages = nr_pages;
2564 wdata->offset = (__u64)offset;
2565 wdata->cfile = cifsFileInfo_get(open_file);
2566 wdata->pid = pid;
2567 wdata->bytes = cur_len;
eddb079d
JL
2568 wdata->pagesz = PAGE_SIZE;
2569 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
cb7e9eab 2570 wdata->credits = credits;
6ec0b01b
PS
2571
2572 if (!wdata->cfile->invalidHandle ||
2573 !cifs_reopen_file(wdata->cfile, false))
2574 rc = server->ops->async_writev(wdata,
2575 cifs_uncached_writedata_release);
da82f7e7 2576 if (rc) {
cb7e9eab 2577 add_credits_and_wake_if(server, wdata->credits, 0);
4a5c80d7
SF
2578 kref_put(&wdata->refcount,
2579 cifs_uncached_writedata_release);
6ec0b01b 2580 if (rc == -EAGAIN) {
fc56b983 2581 *from = saved_from;
6ec0b01b
PS
2582 iov_iter_advance(from, offset - saved_offset);
2583 continue;
2584 }
72432ffc
PS
2585 break;
2586 }
2587
43de94ea 2588 list_add_tail(&wdata->list, wdata_list);
da82f7e7
JL
2589 offset += cur_len;
2590 len -= cur_len;
72432ffc
PS
2591 } while (len > 0);
2592
43de94ea
PS
2593 return rc;
2594}
2595
e9d1593d 2596ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
43de94ea 2597{
e9d1593d 2598 struct file *file = iocb->ki_filp;
43de94ea
PS
2599 ssize_t total_written = 0;
2600 struct cifsFileInfo *open_file;
2601 struct cifs_tcon *tcon;
2602 struct cifs_sb_info *cifs_sb;
2603 struct cifs_writedata *wdata, *tmp;
2604 struct list_head wdata_list;
fc56b983 2605 struct iov_iter saved_from = *from;
43de94ea
PS
2606 int rc;
2607
e9d1593d
AV
2608 /*
2609 * BB - optimize the way when signing is disabled. We can drop this
2610 * extra memory-to-memory copying and use iovec buffers for constructing
2611 * write request.
2612 */
2613
3309dd04
AV
2614 rc = generic_write_checks(iocb, from);
2615 if (rc <= 0)
43de94ea
PS
2616 return rc;
2617
43de94ea 2618 INIT_LIST_HEAD(&wdata_list);
7119e220 2619 cifs_sb = CIFS_FILE_SB(file);
43de94ea
PS
2620 open_file = file->private_data;
2621 tcon = tlink_tcon(open_file->tlink);
2622
2623 if (!tcon->ses->server->ops->async_writev)
2624 return -ENOSYS;
2625
3309dd04
AV
2626 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2627 open_file, cifs_sb, &wdata_list);
43de94ea 2628
da82f7e7
JL
2629 /*
2630 * If at least one write was successfully sent, then discard any rc
2631 * value from the later writes. If the other write succeeds, then
2632 * we'll end up returning whatever was written. If it fails, then
2633 * we'll get a new rc value from that.
2634 */
2635 if (!list_empty(&wdata_list))
2636 rc = 0;
2637
2638 /*
2639 * Wait for and collect replies for any successful sends in order of
2640 * increasing offset. Once an error is hit or we get a fatal signal
2641 * while waiting, then return without waiting for any more replies.
2642 */
2643restart_loop:
2644 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2645 if (!rc) {
2646 /* FIXME: freezable too? */
2647 rc = wait_for_completion_killable(&wdata->done);
2648 if (rc)
2649 rc = -EINTR;
2650 else if (wdata->result)
2651 rc = wdata->result;
2652 else
2653 total_written += wdata->bytes;
2654
2655 /* resend call if it's a retryable error */
2656 if (rc == -EAGAIN) {
6ec0b01b 2657 struct list_head tmp_list;
fc56b983 2658 struct iov_iter tmp_from = saved_from;
6ec0b01b
PS
2659
2660 INIT_LIST_HEAD(&tmp_list);
2661 list_del_init(&wdata->list);
2662
6ec0b01b 2663 iov_iter_advance(&tmp_from,
e9d1593d 2664 wdata->offset - iocb->ki_pos);
6ec0b01b
PS
2665
2666 rc = cifs_write_from_iter(wdata->offset,
2667 wdata->bytes, &tmp_from,
2668 open_file, cifs_sb, &tmp_list);
2669
2670 list_splice(&tmp_list, &wdata_list);
2671
2672 kref_put(&wdata->refcount,
2673 cifs_uncached_writedata_release);
da82f7e7
JL
2674 goto restart_loop;
2675 }
2676 }
2677 list_del_init(&wdata->list);
4a5c80d7 2678 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
72432ffc
PS
2679 }
2680
e9d1593d
AV
2681 if (unlikely(!total_written))
2682 return rc;
72432ffc 2683
e9d1593d
AV
2684 iocb->ki_pos += total_written;
2685 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
da82f7e7 2686 cifs_stats_bytes_written(tcon, total_written);
e9d1593d 2687 return total_written;
72432ffc
PS
2688}
2689
579f9053 2690static ssize_t
3dae8750 2691cifs_writev(struct kiocb *iocb, struct iov_iter *from)
72432ffc 2692{
579f9053
PS
2693 struct file *file = iocb->ki_filp;
2694 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2695 struct inode *inode = file->f_mapping->host;
2696 struct cifsInodeInfo *cinode = CIFS_I(inode);
2697 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
5f380c7f 2698 ssize_t rc;
72432ffc 2699
579f9053
PS
2700 /*
2701 * We need to hold the sem to be sure nobody modifies lock list
2702 * with a brlock that prevents writing.
2703 */
2704 down_read(&cinode->lock_sem);
5955102c 2705 inode_lock(inode);
5f380c7f 2706
3309dd04
AV
2707 rc = generic_write_checks(iocb, from);
2708 if (rc <= 0)
5f380c7f
AV
2709 goto out;
2710
5f380c7f 2711 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
579f9053 2712 server->vals->exclusive_lock_type, NULL,
5f380c7f 2713 CIFS_WRITE_OP))
3dae8750 2714 rc = __generic_file_write_iter(iocb, from);
5f380c7f
AV
2715 else
2716 rc = -EACCES;
2717out:
5955102c 2718 inode_unlock(inode);
19dfc1f5 2719
e2592217
CH
2720 if (rc > 0)
2721 rc = generic_write_sync(iocb, rc);
579f9053 2722 up_read(&cinode->lock_sem);
579f9053
PS
2723 return rc;
2724}
2725
2726ssize_t
3dae8750 2727cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
579f9053 2728{
496ad9aa 2729 struct inode *inode = file_inode(iocb->ki_filp);
579f9053
PS
2730 struct cifsInodeInfo *cinode = CIFS_I(inode);
2731 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2732 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2733 iocb->ki_filp->private_data;
2734 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
88cf75aa 2735 ssize_t written;
ca8aa29c 2736
c11f1df5
SP
2737 written = cifs_get_writer(cinode);
2738 if (written)
2739 return written;
2740
18cceb6a 2741 if (CIFS_CACHE_WRITE(cinode)) {
88cf75aa
PS
2742 if (cap_unix(tcon->ses) &&
2743 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
c11f1df5 2744 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
3dae8750 2745 written = generic_file_write_iter(iocb, from);
c11f1df5
SP
2746 goto out;
2747 }
3dae8750 2748 written = cifs_writev(iocb, from);
c11f1df5 2749 goto out;
25078105 2750 }
25078105 2751 /*
ca8aa29c
PS
2752 * For non-oplocked files in strict cache mode we need to write the data
2753 * to the server exactly from the pos to pos+len-1 rather than flush all
2754 * affected pages because it may cause a error with mandatory locks on
2755 * these pages but not on the region from pos to ppos+len-1.
72432ffc 2756 */
3dae8750 2757 written = cifs_user_writev(iocb, from);
18cceb6a 2758 if (written > 0 && CIFS_CACHE_READ(cinode)) {
88cf75aa
PS
2759 /*
2760 * Windows 7 server can delay breaking level2 oplock if a write
2761 * request comes - break it on the client to prevent reading
2762 * an old data.
2763 */
4f73c7d3 2764 cifs_zap_mapping(inode);
f96637be
JP
2765 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2766 inode);
18cceb6a 2767 cinode->oplock = 0;
88cf75aa 2768 }
c11f1df5
SP
2769out:
2770 cifs_put_writer(cinode);
88cf75aa 2771 return written;
72432ffc
PS
2772}
2773
0471ca3f 2774static struct cifs_readdata *
f4e49cd2 2775cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
0471ca3f
JL
2776{
2777 struct cifs_readdata *rdata;
f4e49cd2 2778
c5fab6f4
JL
2779 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2780 GFP_KERNEL);
0471ca3f 2781 if (rdata != NULL) {
6993f74a 2782 kref_init(&rdata->refcount);
1c892549
JL
2783 INIT_LIST_HEAD(&rdata->list);
2784 init_completion(&rdata->done);
0471ca3f 2785 INIT_WORK(&rdata->work, complete);
0471ca3f 2786 }
f4e49cd2 2787
0471ca3f
JL
2788 return rdata;
2789}
2790
6993f74a
JL
2791void
2792cifs_readdata_release(struct kref *refcount)
0471ca3f 2793{
6993f74a
JL
2794 struct cifs_readdata *rdata = container_of(refcount,
2795 struct cifs_readdata, refcount);
2796
2797 if (rdata->cfile)
2798 cifsFileInfo_put(rdata->cfile);
2799
0471ca3f
JL
2800 kfree(rdata);
2801}
2802
1c892549 2803static int
c5fab6f4 2804cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
1c892549
JL
2805{
2806 int rc = 0;
c5fab6f4 2807 struct page *page;
1c892549
JL
2808 unsigned int i;
2809
c5fab6f4 2810 for (i = 0; i < nr_pages; i++) {
1c892549
JL
2811 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2812 if (!page) {
2813 rc = -ENOMEM;
2814 break;
2815 }
c5fab6f4 2816 rdata->pages[i] = page;
1c892549
JL
2817 }
2818
2819 if (rc) {
c5fab6f4
JL
2820 for (i = 0; i < nr_pages; i++) {
2821 put_page(rdata->pages[i]);
2822 rdata->pages[i] = NULL;
1c892549
JL
2823 }
2824 }
2825 return rc;
2826}
2827
2828static void
2829cifs_uncached_readdata_release(struct kref *refcount)
2830{
1c892549
JL
2831 struct cifs_readdata *rdata = container_of(refcount,
2832 struct cifs_readdata, refcount);
c5fab6f4 2833 unsigned int i;
1c892549 2834
c5fab6f4
JL
2835 for (i = 0; i < rdata->nr_pages; i++) {
2836 put_page(rdata->pages[i]);
2837 rdata->pages[i] = NULL;
1c892549
JL
2838 }
2839 cifs_readdata_release(refcount);
2840}
2841
1c892549
JL
2842/**
2843 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2844 * @rdata: the readdata response with list of pages holding data
7f25bba8 2845 * @iter: destination for our data
1c892549
JL
2846 *
2847 * This function copies data from a list of pages in a readdata response into
2848 * an array of iovecs. It will first calculate where the data should go
2849 * based on the info in the readdata and then copy the data into that spot.
2850 */
7f25bba8
AV
2851static int
2852cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
1c892549 2853{
34a54d61 2854 size_t remaining = rdata->got_bytes;
c5fab6f4 2855 unsigned int i;
1c892549 2856
c5fab6f4 2857 for (i = 0; i < rdata->nr_pages; i++) {
c5fab6f4 2858 struct page *page = rdata->pages[i];
e686bd8d 2859 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
7f25bba8
AV
2860 size_t written = copy_page_to_iter(page, 0, copy, iter);
2861 remaining -= written;
2862 if (written < copy && iov_iter_count(iter) > 0)
2863 break;
1c892549 2864 }
7f25bba8 2865 return remaining ? -EFAULT : 0;
1c892549
JL
2866}
2867
2868static void
2869cifs_uncached_readv_complete(struct work_struct *work)
2870{
2871 struct cifs_readdata *rdata = container_of(work,
2872 struct cifs_readdata, work);
1c892549
JL
2873
2874 complete(&rdata->done);
2875 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2876}
2877
2878static int
8321fec4
JL
2879cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2880 struct cifs_readdata *rdata, unsigned int len)
1c892549 2881{
b3160aeb 2882 int result = 0;
c5fab6f4
JL
2883 unsigned int i;
2884 unsigned int nr_pages = rdata->nr_pages;
1c892549 2885
b3160aeb 2886 rdata->got_bytes = 0;
8321fec4 2887 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
2888 for (i = 0; i < nr_pages; i++) {
2889 struct page *page = rdata->pages[i];
71335664 2890 size_t n;
c5fab6f4 2891
71335664 2892 if (len <= 0) {
1c892549 2893 /* no need to hold page hostage */
c5fab6f4
JL
2894 rdata->pages[i] = NULL;
2895 rdata->nr_pages--;
1c892549 2896 put_page(page);
8321fec4 2897 continue;
1c892549 2898 }
71335664
AV
2899 n = len;
2900 if (len >= PAGE_SIZE) {
2901 /* enough data to fill the page */
2902 n = PAGE_SIZE;
2903 len -= n;
2904 } else {
2905 zero_user(page, len, PAGE_SIZE - len);
2906 rdata->tailsz = len;
2907 len = 0;
2908 }
2909 result = cifs_read_page_from_socket(server, page, n);
8321fec4
JL
2910 if (result < 0)
2911 break;
2912
b3160aeb 2913 rdata->got_bytes += result;
1c892549
JL
2914 }
2915
b3160aeb
PS
2916 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
2917 rdata->got_bytes : result;
1c892549
JL
2918}
2919
0ada36b2
PS
2920static int
2921cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
2922 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
1da177e4 2923{
0ada36b2 2924 struct cifs_readdata *rdata;
bed9da02 2925 unsigned int npages, rsize, credits;
0ada36b2
PS
2926 size_t cur_len;
2927 int rc;
1c892549 2928 pid_t pid;
25f40259 2929 struct TCP_Server_Info *server;
a70307ee 2930
25f40259 2931 server = tlink_tcon(open_file->tlink)->ses->server;
fc9c5966 2932
d4ffff1f
PS
2933 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2934 pid = open_file->pid;
2935 else
2936 pid = current->tgid;
2937
1c892549 2938 do {
bed9da02
PS
2939 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
2940 &rsize, &credits);
2941 if (rc)
2942 break;
2943
2944 cur_len = min_t(const size_t, len, rsize);
1c892549 2945 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
a70307ee 2946
1c892549
JL
2947 /* allocate a readdata struct */
2948 rdata = cifs_readdata_alloc(npages,
2949 cifs_uncached_readv_complete);
2950 if (!rdata) {
bed9da02 2951 add_credits_and_wake_if(server, credits, 0);
1c892549 2952 rc = -ENOMEM;
bae9f746 2953 break;
1da177e4 2954 }
a70307ee 2955
c5fab6f4 2956 rc = cifs_read_allocate_pages(rdata, npages);
1c892549
JL
2957 if (rc)
2958 goto error;
2959
2960 rdata->cfile = cifsFileInfo_get(open_file);
c5fab6f4 2961 rdata->nr_pages = npages;
1c892549
JL
2962 rdata->offset = offset;
2963 rdata->bytes = cur_len;
2964 rdata->pid = pid;
8321fec4
JL
2965 rdata->pagesz = PAGE_SIZE;
2966 rdata->read_into_pages = cifs_uncached_read_into_pages;
bed9da02 2967 rdata->credits = credits;
1c892549 2968
25f40259
PS
2969 if (!rdata->cfile->invalidHandle ||
2970 !cifs_reopen_file(rdata->cfile, true))
2971 rc = server->ops->async_readv(rdata);
1c892549
JL
2972error:
2973 if (rc) {
bed9da02 2974 add_credits_and_wake_if(server, rdata->credits, 0);
1c892549
JL
2975 kref_put(&rdata->refcount,
2976 cifs_uncached_readdata_release);
25f40259
PS
2977 if (rc == -EAGAIN)
2978 continue;
1c892549
JL
2979 break;
2980 }
2981
0ada36b2 2982 list_add_tail(&rdata->list, rdata_list);
1c892549
JL
2983 offset += cur_len;
2984 len -= cur_len;
2985 } while (len > 0);
2986
0ada36b2
PS
2987 return rc;
2988}
2989
2990ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
2991{
2992 struct file *file = iocb->ki_filp;
2993 ssize_t rc;
2994 size_t len;
2995 ssize_t total_read = 0;
2996 loff_t offset = iocb->ki_pos;
2997 struct cifs_sb_info *cifs_sb;
2998 struct cifs_tcon *tcon;
2999 struct cifsFileInfo *open_file;
3000 struct cifs_readdata *rdata, *tmp;
3001 struct list_head rdata_list;
3002
3003 len = iov_iter_count(to);
3004 if (!len)
3005 return 0;
3006
3007 INIT_LIST_HEAD(&rdata_list);
7119e220 3008 cifs_sb = CIFS_FILE_SB(file);
0ada36b2
PS
3009 open_file = file->private_data;
3010 tcon = tlink_tcon(open_file->tlink);
3011
3012 if (!tcon->ses->server->ops->async_readv)
3013 return -ENOSYS;
3014
3015 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3016 cifs_dbg(FYI, "attempting read on write only file instance\n");
3017
3018 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3019
1c892549
JL
3020 /* if at least one read request send succeeded, then reset rc */
3021 if (!list_empty(&rdata_list))
3022 rc = 0;
3023
e6a7bcb4 3024 len = iov_iter_count(to);
1c892549 3025 /* the loop below should proceed in the order of increasing offsets */
25f40259 3026again:
1c892549
JL
3027 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3028 if (!rc) {
1c892549
JL
3029 /* FIXME: freezable sleep too? */
3030 rc = wait_for_completion_killable(&rdata->done);
3031 if (rc)
3032 rc = -EINTR;
fb8a3e52 3033 else if (rdata->result == -EAGAIN) {
74027f4a 3034 /* resend call if it's a retryable error */
fb8a3e52 3035 struct list_head tmp_list;
d913ed17 3036 unsigned int got_bytes = rdata->got_bytes;
25f40259 3037
fb8a3e52
PS
3038 list_del_init(&rdata->list);
3039 INIT_LIST_HEAD(&tmp_list);
25f40259 3040
d913ed17
PS
3041 /*
3042 * Got a part of data and then reconnect has
3043 * happened -- fill the buffer and continue
3044 * reading.
3045 */
3046 if (got_bytes && got_bytes < rdata->bytes) {
3047 rc = cifs_readdata_to_iov(rdata, to);
3048 if (rc) {
3049 kref_put(&rdata->refcount,
3050 cifs_uncached_readdata_release);
3051 continue;
3052 }
74027f4a 3053 }
d913ed17
PS
3054
3055 rc = cifs_send_async_read(
3056 rdata->offset + got_bytes,
3057 rdata->bytes - got_bytes,
3058 rdata->cfile, cifs_sb,
3059 &tmp_list);
25f40259 3060
fb8a3e52 3061 list_splice(&tmp_list, &rdata_list);
25f40259 3062
fb8a3e52
PS
3063 kref_put(&rdata->refcount,
3064 cifs_uncached_readdata_release);
3065 goto again;
3066 } else if (rdata->result)
3067 rc = rdata->result;
3068 else
e6a7bcb4 3069 rc = cifs_readdata_to_iov(rdata, to);
1c892549 3070
2e8a05d8
PS
3071 /* if there was a short read -- discard anything left */
3072 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3073 rc = -ENODATA;
1da177e4 3074 }
1c892549
JL
3075 list_del_init(&rdata->list);
3076 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
1da177e4 3077 }
a70307ee 3078
e6a7bcb4 3079 total_read = len - iov_iter_count(to);
7f25bba8 3080
1c892549 3081 cifs_stats_bytes_read(tcon, total_read);
1c892549 3082
09a4707e
PS
3083 /* mask nodata case */
3084 if (rc == -ENODATA)
3085 rc = 0;
3086
0165e810 3087 if (total_read) {
e6a7bcb4 3088 iocb->ki_pos += total_read;
0165e810
AV
3089 return total_read;
3090 }
3091 return rc;
a70307ee
PS
3092}
3093
579f9053 3094ssize_t
e6a7bcb4 3095cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
a70307ee 3096{
496ad9aa 3097 struct inode *inode = file_inode(iocb->ki_filp);
579f9053
PS
3098 struct cifsInodeInfo *cinode = CIFS_I(inode);
3099 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3100 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3101 iocb->ki_filp->private_data;
3102 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3103 int rc = -EACCES;
a70307ee
PS
3104
3105 /*
3106 * In strict cache mode we need to read from the server all the time
3107 * if we don't have level II oplock because the server can delay mtime
3108 * change - so we can't make a decision about inode invalidating.
3109 * And we can also fail with pagereading if there are mandatory locks
3110 * on pages affected by this read but not on the region from pos to
3111 * pos+len-1.
3112 */
18cceb6a 3113 if (!CIFS_CACHE_READ(cinode))
e6a7bcb4 3114 return cifs_user_readv(iocb, to);
a70307ee 3115
579f9053
PS
3116 if (cap_unix(tcon->ses) &&
3117 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3118 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
e6a7bcb4 3119 return generic_file_read_iter(iocb, to);
579f9053
PS
3120
3121 /*
3122 * We need to hold the sem to be sure nobody modifies lock list
3123 * with a brlock that prevents reading.
3124 */
3125 down_read(&cinode->lock_sem);
e6a7bcb4 3126 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
579f9053 3127 tcon->ses->server->vals->shared_lock_type,
081c0414 3128 NULL, CIFS_READ_OP))
e6a7bcb4 3129 rc = generic_file_read_iter(iocb, to);
579f9053
PS
3130 up_read(&cinode->lock_sem);
3131 return rc;
a70307ee 3132}
1da177e4 3133
f9c6e234
PS
3134static ssize_t
3135cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
1da177e4
LT
3136{
3137 int rc = -EACCES;
3138 unsigned int bytes_read = 0;
3139 unsigned int total_read;
3140 unsigned int current_read_size;
5eba8ab3 3141 unsigned int rsize;
1da177e4 3142 struct cifs_sb_info *cifs_sb;
29e20f9c 3143 struct cifs_tcon *tcon;
f9c6e234 3144 struct TCP_Server_Info *server;
6d5786a3 3145 unsigned int xid;
f9c6e234 3146 char *cur_offset;
1da177e4 3147 struct cifsFileInfo *open_file;
d4ffff1f 3148 struct cifs_io_parms io_parms;
ec637e3f 3149 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 3150 __u32 pid;
1da177e4 3151
6d5786a3 3152 xid = get_xid();
7119e220 3153 cifs_sb = CIFS_FILE_SB(file);
1da177e4 3154
5eba8ab3
JL
3155 /* FIXME: set up handlers for larger reads and/or convert to async */
3156 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3157
1da177e4 3158 if (file->private_data == NULL) {
0f3bc09e 3159 rc = -EBADF;
6d5786a3 3160 free_xid(xid);
0f3bc09e 3161 return rc;
1da177e4 3162 }
c21dfb69 3163 open_file = file->private_data;
29e20f9c 3164 tcon = tlink_tcon(open_file->tlink);
f9c6e234
PS
3165 server = tcon->ses->server;
3166
3167 if (!server->ops->sync_read) {
3168 free_xid(xid);
3169 return -ENOSYS;
3170 }
1da177e4 3171
d4ffff1f
PS
3172 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3173 pid = open_file->pid;
3174 else
3175 pid = current->tgid;
3176
1da177e4 3177 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
f96637be 3178 cifs_dbg(FYI, "attempting read on write only file instance\n");
1da177e4 3179
f9c6e234
PS
3180 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3181 total_read += bytes_read, cur_offset += bytes_read) {
e374d90f
PS
3182 do {
3183 current_read_size = min_t(uint, read_size - total_read,
3184 rsize);
3185 /*
3186 * For windows me and 9x we do not want to request more
3187 * than it negotiated since it will refuse the read
3188 * then.
3189 */
3190 if ((tcon->ses) && !(tcon->ses->capabilities &
29e20f9c 3191 tcon->ses->server->vals->cap_large_files)) {
e374d90f
PS
3192 current_read_size = min_t(uint,
3193 current_read_size, CIFSMaxBufSize);
3194 }
cdff08e7 3195 if (open_file->invalidHandle) {
15886177 3196 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
3197 if (rc != 0)
3198 break;
3199 }
d4ffff1f 3200 io_parms.pid = pid;
29e20f9c 3201 io_parms.tcon = tcon;
f9c6e234 3202 io_parms.offset = *offset;
d4ffff1f 3203 io_parms.length = current_read_size;
db8b631d 3204 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
f9c6e234
PS
3205 &bytes_read, &cur_offset,
3206 &buf_type);
e374d90f
PS
3207 } while (rc == -EAGAIN);
3208
1da177e4
LT
3209 if (rc || (bytes_read == 0)) {
3210 if (total_read) {
3211 break;
3212 } else {
6d5786a3 3213 free_xid(xid);
1da177e4
LT
3214 return rc;
3215 }
3216 } else {
29e20f9c 3217 cifs_stats_bytes_read(tcon, total_read);
f9c6e234 3218 *offset += bytes_read;
1da177e4
LT
3219 }
3220 }
6d5786a3 3221 free_xid(xid);
1da177e4
LT
3222 return total_read;
3223}
3224
ca83ce3d
JL
3225/*
3226 * If the page is mmap'ed into a process' page tables, then we need to make
3227 * sure that it doesn't change while being written back.
3228 */
3229static int
3230cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3231{
3232 struct page *page = vmf->page;
3233
3234 lock_page(page);
3235 return VM_FAULT_LOCKED;
3236}
3237
7cbea8dc 3238static const struct vm_operations_struct cifs_file_vm_ops = {
ca83ce3d 3239 .fault = filemap_fault,
f1820361 3240 .map_pages = filemap_map_pages,
ca83ce3d
JL
3241 .page_mkwrite = cifs_page_mkwrite,
3242};
3243
7a6a19b1
PS
3244int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3245{
3246 int rc, xid;
496ad9aa 3247 struct inode *inode = file_inode(file);
7a6a19b1 3248
6d5786a3 3249 xid = get_xid();
7a6a19b1 3250
18cceb6a 3251 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 3252 rc = cifs_zap_mapping(inode);
6feb9891
PS
3253 if (rc)
3254 return rc;
3255 }
7a6a19b1
PS
3256
3257 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
3258 if (rc == 0)
3259 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 3260 free_xid(xid);
7a6a19b1
PS
3261 return rc;
3262}
3263
1da177e4
LT
3264int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3265{
1da177e4
LT
3266 int rc, xid;
3267
6d5786a3 3268 xid = get_xid();
abab095d 3269 rc = cifs_revalidate_file(file);
1da177e4 3270 if (rc) {
f96637be
JP
3271 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3272 rc);
6d5786a3 3273 free_xid(xid);
1da177e4
LT
3274 return rc;
3275 }
3276 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
3277 if (rc == 0)
3278 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 3279 free_xid(xid);
1da177e4
LT
3280 return rc;
3281}
3282
0471ca3f
JL
3283static void
3284cifs_readv_complete(struct work_struct *work)
3285{
b770ddfa 3286 unsigned int i, got_bytes;
0471ca3f
JL
3287 struct cifs_readdata *rdata = container_of(work,
3288 struct cifs_readdata, work);
0471ca3f 3289
b770ddfa 3290 got_bytes = rdata->got_bytes;
c5fab6f4
JL
3291 for (i = 0; i < rdata->nr_pages; i++) {
3292 struct page *page = rdata->pages[i];
3293
0471ca3f
JL
3294 lru_cache_add_file(page);
3295
b770ddfa
PS
3296 if (rdata->result == 0 ||
3297 (rdata->result == -EAGAIN && got_bytes)) {
0471ca3f
JL
3298 flush_dcache_page(page);
3299 SetPageUptodate(page);
3300 }
3301
3302 unlock_page(page);
3303
b770ddfa
PS
3304 if (rdata->result == 0 ||
3305 (rdata->result == -EAGAIN && got_bytes))
0471ca3f
JL
3306 cifs_readpage_to_fscache(rdata->mapping->host, page);
3307
09cbfeaf 3308 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
b770ddfa 3309
09cbfeaf 3310 put_page(page);
c5fab6f4 3311 rdata->pages[i] = NULL;
0471ca3f 3312 }
6993f74a 3313 kref_put(&rdata->refcount, cifs_readdata_release);
0471ca3f
JL
3314}
3315
8d5ce4d2 3316static int
8321fec4
JL
3317cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3318 struct cifs_readdata *rdata, unsigned int len)
8d5ce4d2 3319{
b3160aeb 3320 int result = 0;
c5fab6f4 3321 unsigned int i;
8d5ce4d2
JL
3322 u64 eof;
3323 pgoff_t eof_index;
c5fab6f4 3324 unsigned int nr_pages = rdata->nr_pages;
8d5ce4d2
JL
3325
3326 /* determine the eof that the server (probably) has */
3327 eof = CIFS_I(rdata->mapping->host)->server_eof;
09cbfeaf 3328 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
f96637be 3329 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
8d5ce4d2 3330
b3160aeb 3331 rdata->got_bytes = 0;
09cbfeaf 3332 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
3333 for (i = 0; i < nr_pages; i++) {
3334 struct page *page = rdata->pages[i];
442c9ac9 3335 size_t n = PAGE_SIZE;
c5fab6f4 3336
09cbfeaf 3337 if (len >= PAGE_SIZE) {
09cbfeaf 3338 len -= PAGE_SIZE;
8321fec4 3339 } else if (len > 0) {
8d5ce4d2 3340 /* enough for partial page, fill and zero the rest */
442c9ac9 3341 zero_user(page, len, PAGE_SIZE - len);
71335664 3342 n = rdata->tailsz = len;
8321fec4 3343 len = 0;
8d5ce4d2
JL
3344 } else if (page->index > eof_index) {
3345 /*
3346 * The VFS will not try to do readahead past the
3347 * i_size, but it's possible that we have outstanding
3348 * writes with gaps in the middle and the i_size hasn't
3349 * caught up yet. Populate those with zeroed out pages
3350 * to prevent the VFS from repeatedly attempting to
3351 * fill them until the writes are flushed.
3352 */
09cbfeaf 3353 zero_user(page, 0, PAGE_SIZE);
8d5ce4d2
JL
3354 lru_cache_add_file(page);
3355 flush_dcache_page(page);
3356 SetPageUptodate(page);
3357 unlock_page(page);
09cbfeaf 3358 put_page(page);
c5fab6f4
JL
3359 rdata->pages[i] = NULL;
3360 rdata->nr_pages--;
8321fec4 3361 continue;
8d5ce4d2
JL
3362 } else {
3363 /* no need to hold page hostage */
8d5ce4d2
JL
3364 lru_cache_add_file(page);
3365 unlock_page(page);
09cbfeaf 3366 put_page(page);
c5fab6f4
JL
3367 rdata->pages[i] = NULL;
3368 rdata->nr_pages--;
8321fec4 3369 continue;
8d5ce4d2 3370 }
8321fec4 3371
71335664 3372 result = cifs_read_page_from_socket(server, page, n);
8321fec4
JL
3373 if (result < 0)
3374 break;
3375
b3160aeb 3376 rdata->got_bytes += result;
8d5ce4d2
JL
3377 }
3378
b3160aeb
PS
3379 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3380 rdata->got_bytes : result;
8d5ce4d2
JL
3381}
3382
387eb92a
PS
3383static int
3384readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3385 unsigned int rsize, struct list_head *tmplist,
3386 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3387{
3388 struct page *page, *tpage;
3389 unsigned int expected_index;
3390 int rc;
8a5c743e 3391 gfp_t gfp = readahead_gfp_mask(mapping);
387eb92a 3392
69cebd75
PS
3393 INIT_LIST_HEAD(tmplist);
3394
387eb92a
PS
3395 page = list_entry(page_list->prev, struct page, lru);
3396
3397 /*
3398 * Lock the page and put it in the cache. Since no one else
3399 * should have access to this page, we're safe to simply set
3400 * PG_locked without checking it first.
3401 */
48c935ad 3402 __SetPageLocked(page);
387eb92a 3403 rc = add_to_page_cache_locked(page, mapping,
063d99b4 3404 page->index, gfp);
387eb92a
PS
3405
3406 /* give up if we can't stick it in the cache */
3407 if (rc) {
48c935ad 3408 __ClearPageLocked(page);
387eb92a
PS
3409 return rc;
3410 }
3411
3412 /* move first page to the tmplist */
09cbfeaf
KS
3413 *offset = (loff_t)page->index << PAGE_SHIFT;
3414 *bytes = PAGE_SIZE;
387eb92a
PS
3415 *nr_pages = 1;
3416 list_move_tail(&page->lru, tmplist);
3417
3418 /* now try and add more pages onto the request */
3419 expected_index = page->index + 1;
3420 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3421 /* discontinuity ? */
3422 if (page->index != expected_index)
3423 break;
3424
3425 /* would this page push the read over the rsize? */
09cbfeaf 3426 if (*bytes + PAGE_SIZE > rsize)
387eb92a
PS
3427 break;
3428
48c935ad 3429 __SetPageLocked(page);
063d99b4 3430 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
48c935ad 3431 __ClearPageLocked(page);
387eb92a
PS
3432 break;
3433 }
3434 list_move_tail(&page->lru, tmplist);
09cbfeaf 3435 (*bytes) += PAGE_SIZE;
387eb92a
PS
3436 expected_index++;
3437 (*nr_pages)++;
3438 }
3439 return rc;
8d5ce4d2
JL
3440}
3441
1da177e4
LT
3442static int cifs_readpages(struct file *file, struct address_space *mapping,
3443 struct list_head *page_list, unsigned num_pages)
3444{
690c5e31
JL
3445 int rc;
3446 struct list_head tmplist;
3447 struct cifsFileInfo *open_file = file->private_data;
7119e220 3448 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
69cebd75 3449 struct TCP_Server_Info *server;
690c5e31 3450 pid_t pid;
1da177e4 3451
56698236
SJ
3452 /*
3453 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3454 * immediately if the cookie is negative
54afa990
DH
3455 *
3456 * After this point, every page in the list might have PG_fscache set,
3457 * so we will need to clean that up off of every page we don't use.
56698236
SJ
3458 */
3459 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3460 &num_pages);
3461 if (rc == 0)
690c5e31 3462 return rc;
56698236 3463
d4ffff1f
PS
3464 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3465 pid = open_file->pid;
3466 else
3467 pid = current->tgid;
3468
690c5e31 3469 rc = 0;
69cebd75 3470 server = tlink_tcon(open_file->tlink)->ses->server;
1da177e4 3471
f96637be
JP
3472 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3473 __func__, file, mapping, num_pages);
690c5e31
JL
3474
3475 /*
3476 * Start with the page at end of list and move it to private
3477 * list. Do the same with any following pages until we hit
3478 * the rsize limit, hit an index discontinuity, or run out of
3479 * pages. Issue the async read and then start the loop again
3480 * until the list is empty.
3481 *
3482 * Note that list order is important. The page_list is in
3483 * the order of declining indexes. When we put the pages in
3484 * the rdata->pages, then we want them in increasing order.
3485 */
3486 while (!list_empty(page_list)) {
bed9da02 3487 unsigned int i, nr_pages, bytes, rsize;
690c5e31
JL
3488 loff_t offset;
3489 struct page *page, *tpage;
3490 struct cifs_readdata *rdata;
bed9da02 3491 unsigned credits;
1da177e4 3492
bed9da02
PS
3493 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3494 &rsize, &credits);
3495 if (rc)
3496 break;
690c5e31
JL
3497
3498 /*
69cebd75
PS
3499 * Give up immediately if rsize is too small to read an entire
3500 * page. The VFS will fall back to readpage. We should never
3501 * reach this point however since we set ra_pages to 0 when the
3502 * rsize is smaller than a cache page.
690c5e31 3503 */
09cbfeaf 3504 if (unlikely(rsize < PAGE_SIZE)) {
bed9da02 3505 add_credits_and_wake_if(server, credits, 0);
69cebd75 3506 return 0;
bed9da02 3507 }
690c5e31 3508
bed9da02
PS
3509 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3510 &nr_pages, &offset, &bytes);
690c5e31 3511 if (rc) {
bed9da02 3512 add_credits_and_wake_if(server, credits, 0);
690c5e31
JL
3513 break;
3514 }
3515
0471ca3f 3516 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
690c5e31
JL
3517 if (!rdata) {
3518 /* best to give up if we're out of mem */
3519 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3520 list_del(&page->lru);
3521 lru_cache_add_file(page);
3522 unlock_page(page);
09cbfeaf 3523 put_page(page);
690c5e31
JL
3524 }
3525 rc = -ENOMEM;
bed9da02 3526 add_credits_and_wake_if(server, credits, 0);
690c5e31
JL
3527 break;
3528 }
3529
6993f74a 3530 rdata->cfile = cifsFileInfo_get(open_file);
690c5e31
JL
3531 rdata->mapping = mapping;
3532 rdata->offset = offset;
3533 rdata->bytes = bytes;
3534 rdata->pid = pid;
09cbfeaf 3535 rdata->pagesz = PAGE_SIZE;
8321fec4 3536 rdata->read_into_pages = cifs_readpages_read_into_pages;
bed9da02 3537 rdata->credits = credits;
c5fab6f4
JL
3538
3539 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3540 list_del(&page->lru);
3541 rdata->pages[rdata->nr_pages++] = page;
3542 }
690c5e31 3543
69cebd75
PS
3544 if (!rdata->cfile->invalidHandle ||
3545 !cifs_reopen_file(rdata->cfile, true))
3546 rc = server->ops->async_readv(rdata);
3547 if (rc) {
bed9da02 3548 add_credits_and_wake_if(server, rdata->credits, 0);
c5fab6f4
JL
3549 for (i = 0; i < rdata->nr_pages; i++) {
3550 page = rdata->pages[i];
690c5e31
JL
3551 lru_cache_add_file(page);
3552 unlock_page(page);
09cbfeaf 3553 put_page(page);
1da177e4 3554 }
1209bbdf 3555 /* Fallback to the readpage in error/reconnect cases */
6993f74a 3556 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3557 break;
3558 }
6993f74a
JL
3559
3560 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3561 }
3562
54afa990
DH
3563 /* Any pages that have been shown to fscache but didn't get added to
3564 * the pagecache must be uncached before they get returned to the
3565 * allocator.
3566 */
3567 cifs_fscache_readpages_cancel(mapping->host, page_list);
1da177e4
LT
3568 return rc;
3569}
3570
a9e9b7bc
SP
3571/*
3572 * cifs_readpage_worker must be called with the page pinned
3573 */
1da177e4
LT
3574static int cifs_readpage_worker(struct file *file, struct page *page,
3575 loff_t *poffset)
3576{
3577 char *read_data;
3578 int rc;
3579
56698236 3580 /* Is the page cached? */
496ad9aa 3581 rc = cifs_readpage_from_fscache(file_inode(file), page);
56698236
SJ
3582 if (rc == 0)
3583 goto read_complete;
3584
1da177e4
LT
3585 read_data = kmap(page);
3586 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 3587
09cbfeaf 3588 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
fb8c4b14 3589
1da177e4
LT
3590 if (rc < 0)
3591 goto io_error;
3592 else
f96637be 3593 cifs_dbg(FYI, "Bytes read %d\n", rc);
fb8c4b14 3594
496ad9aa 3595 file_inode(file)->i_atime =
c2050a45 3596 current_time(file_inode(file));
fb8c4b14 3597
09cbfeaf
KS
3598 if (PAGE_SIZE > rc)
3599 memset(read_data + rc, 0, PAGE_SIZE - rc);
1da177e4
LT
3600
3601 flush_dcache_page(page);
3602 SetPageUptodate(page);
9dc06558
SJ
3603
3604 /* send this page to the cache */
496ad9aa 3605 cifs_readpage_to_fscache(file_inode(file), page);
9dc06558 3606
1da177e4 3607 rc = 0;
fb8c4b14 3608
1da177e4 3609io_error:
fb8c4b14 3610 kunmap(page);
466bd31b 3611 unlock_page(page);
56698236
SJ
3612
3613read_complete:
1da177e4
LT
3614 return rc;
3615}
3616
3617static int cifs_readpage(struct file *file, struct page *page)
3618{
09cbfeaf 3619 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1da177e4 3620 int rc = -EACCES;
6d5786a3 3621 unsigned int xid;
1da177e4 3622
6d5786a3 3623 xid = get_xid();
1da177e4
LT
3624
3625 if (file->private_data == NULL) {
0f3bc09e 3626 rc = -EBADF;
6d5786a3 3627 free_xid(xid);
0f3bc09e 3628 return rc;
1da177e4
LT
3629 }
3630
f96637be 3631 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
b6b38f70 3632 page, (int)offset, (int)offset);
1da177e4
LT
3633
3634 rc = cifs_readpage_worker(file, page, &offset);
3635
6d5786a3 3636 free_xid(xid);
1da177e4
LT
3637 return rc;
3638}
3639
a403a0a3
SF
3640static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3641{
3642 struct cifsFileInfo *open_file;
3afca265
SF
3643 struct cifs_tcon *tcon =
3644 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
a403a0a3 3645
3afca265 3646 spin_lock(&tcon->open_file_lock);
a403a0a3 3647 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 3648 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3afca265 3649 spin_unlock(&tcon->open_file_lock);
a403a0a3
SF
3650 return 1;
3651 }
3652 }
3afca265 3653 spin_unlock(&tcon->open_file_lock);
a403a0a3
SF
3654 return 0;
3655}
3656
1da177e4
LT
3657/* We do not want to update the file size from server for inodes
3658 open for write - to avoid races with writepage extending
3659 the file - in the future we could consider allowing
fb8c4b14 3660 refreshing the inode only on increases in the file size
1da177e4
LT
3661 but this is tricky to do without racing with writebehind
3662 page caching in the current Linux kernel design */
4b18f2a9 3663bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 3664{
a403a0a3 3665 if (!cifsInode)
4b18f2a9 3666 return true;
50c2f753 3667
a403a0a3
SF
3668 if (is_inode_writable(cifsInode)) {
3669 /* This inode is open for write at least once */
c32a0b68
SF
3670 struct cifs_sb_info *cifs_sb;
3671
c32a0b68 3672 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 3673 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 3674 /* since no page cache to corrupt on directio
c32a0b68 3675 we can change size safely */
4b18f2a9 3676 return true;
c32a0b68
SF
3677 }
3678
fb8c4b14 3679 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 3680 return true;
7ba52631 3681
4b18f2a9 3682 return false;
23e7dd7d 3683 } else
4b18f2a9 3684 return true;
1da177e4
LT
3685}
3686
d9414774
NP
3687static int cifs_write_begin(struct file *file, struct address_space *mapping,
3688 loff_t pos, unsigned len, unsigned flags,
3689 struct page **pagep, void **fsdata)
1da177e4 3690{
466bd31b 3691 int oncethru = 0;
09cbfeaf
KS
3692 pgoff_t index = pos >> PAGE_SHIFT;
3693 loff_t offset = pos & (PAGE_SIZE - 1);
a98ee8c1
JL
3694 loff_t page_start = pos & PAGE_MASK;
3695 loff_t i_size;
3696 struct page *page;
3697 int rc = 0;
d9414774 3698
f96637be 3699 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
d9414774 3700
466bd31b 3701start:
54566b2c 3702 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
3703 if (!page) {
3704 rc = -ENOMEM;
3705 goto out;
3706 }
8a236264 3707
a98ee8c1
JL
3708 if (PageUptodate(page))
3709 goto out;
8a236264 3710
a98ee8c1
JL
3711 /*
3712 * If we write a full page it will be up to date, no need to read from
3713 * the server. If the write is short, we'll end up doing a sync write
3714 * instead.
3715 */
09cbfeaf 3716 if (len == PAGE_SIZE)
a98ee8c1 3717 goto out;
8a236264 3718
a98ee8c1
JL
3719 /*
3720 * optimize away the read when we have an oplock, and we're not
3721 * expecting to use any of the data we'd be reading in. That
3722 * is, when the page lies beyond the EOF, or straddles the EOF
3723 * and the write will cover all of the existing data.
3724 */
18cceb6a 3725 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
a98ee8c1
JL
3726 i_size = i_size_read(mapping->host);
3727 if (page_start >= i_size ||
3728 (offset == 0 && (pos + len) >= i_size)) {
3729 zero_user_segments(page, 0, offset,
3730 offset + len,
09cbfeaf 3731 PAGE_SIZE);
a98ee8c1
JL
3732 /*
3733 * PageChecked means that the parts of the page
3734 * to which we're not writing are considered up
3735 * to date. Once the data is copied to the
3736 * page, it can be set uptodate.
3737 */
3738 SetPageChecked(page);
3739 goto out;
3740 }
3741 }
d9414774 3742
466bd31b 3743 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
a98ee8c1
JL
3744 /*
3745 * might as well read a page, it is fast enough. If we get
3746 * an error, we don't need to return it. cifs_write_end will
3747 * do a sync write instead since PG_uptodate isn't set.
3748 */
3749 cifs_readpage_worker(file, page, &page_start);
09cbfeaf 3750 put_page(page);
466bd31b
SP
3751 oncethru = 1;
3752 goto start;
8a236264
SF
3753 } else {
3754 /* we could try using another file handle if there is one -
3755 but how would we lock it to prevent close of that handle
3756 racing with this read? In any case
d9414774 3757 this will be written out by write_end so is fine */
1da177e4 3758 }
a98ee8c1
JL
3759out:
3760 *pagep = page;
3761 return rc;
1da177e4
LT
3762}
3763
85f2d6b4
SJ
3764static int cifs_release_page(struct page *page, gfp_t gfp)
3765{
3766 if (PagePrivate(page))
3767 return 0;
3768
3769 return cifs_fscache_release_page(page, gfp);
3770}
3771
d47992f8
LC
3772static void cifs_invalidate_page(struct page *page, unsigned int offset,
3773 unsigned int length)
85f2d6b4
SJ
3774{
3775 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3776
09cbfeaf 3777 if (offset == 0 && length == PAGE_SIZE)
85f2d6b4
SJ
3778 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3779}
3780
9ad1506b
PS
3781static int cifs_launder_page(struct page *page)
3782{
3783 int rc = 0;
3784 loff_t range_start = page_offset(page);
09cbfeaf 3785 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
9ad1506b
PS
3786 struct writeback_control wbc = {
3787 .sync_mode = WB_SYNC_ALL,
3788 .nr_to_write = 0,
3789 .range_start = range_start,
3790 .range_end = range_end,
3791 };
3792
f96637be 3793 cifs_dbg(FYI, "Launder page: %p\n", page);
9ad1506b
PS
3794
3795 if (clear_page_dirty_for_io(page))
3796 rc = cifs_writepage_locked(page, &wbc);
3797
3798 cifs_fscache_invalidate_page(page, page->mapping->host);
3799 return rc;
3800}
3801
9b646972 3802void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
3803{
3804 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3805 oplock_break);
2b0143b5 3806 struct inode *inode = d_inode(cfile->dentry);
3bc303c2 3807 struct cifsInodeInfo *cinode = CIFS_I(inode);
95a3f2f3 3808 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
c11f1df5 3809 struct TCP_Server_Info *server = tcon->ses->server;
eb4b756b 3810 int rc = 0;
3bc303c2 3811
c11f1df5 3812 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
74316201 3813 TASK_UNINTERRUPTIBLE);
c11f1df5
SP
3814
3815 server->ops->downgrade_oplock(server, cinode,
3816 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3817
18cceb6a 3818 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
63b7d3a4 3819 cifs_has_mand_locks(cinode)) {
f96637be
JP
3820 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3821 inode);
18cceb6a 3822 cinode->oplock = 0;
63b7d3a4
PS
3823 }
3824
3bc303c2 3825 if (inode && S_ISREG(inode->i_mode)) {
18cceb6a 3826 if (CIFS_CACHE_READ(cinode))
8737c930 3827 break_lease(inode, O_RDONLY);
d54ff732 3828 else
8737c930 3829 break_lease(inode, O_WRONLY);
3bc303c2 3830 rc = filemap_fdatawrite(inode->i_mapping);
18cceb6a 3831 if (!CIFS_CACHE_READ(cinode)) {
eb4b756b
JL
3832 rc = filemap_fdatawait(inode->i_mapping);
3833 mapping_set_error(inode->i_mapping, rc);
4f73c7d3 3834 cifs_zap_mapping(inode);
3bc303c2 3835 }
f96637be 3836 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3bc303c2
JL
3837 }
3838
85160e03
PS
3839 rc = cifs_push_locks(cfile);
3840 if (rc)
f96637be 3841 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
85160e03 3842
3bc303c2
JL
3843 /*
3844 * releasing stale oplock after recent reconnect of smb session using
3845 * a now incorrect file handle is not a data integrity issue but do
3846 * not bother sending an oplock release if session to server still is
3847 * disconnected since oplock already released by the server
3848 */
cdff08e7 3849 if (!cfile->oplock_break_cancelled) {
95a3f2f3
PS
3850 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3851 cinode);
f96637be 3852 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3bc303c2 3853 }
c11f1df5 3854 cifs_done_oplock_break(cinode);
3bc303c2
JL
3855}
3856
dca69288
SF
3857/*
3858 * The presence of cifs_direct_io() in the address space ops vector
3859 * allowes open() O_DIRECT flags which would have failed otherwise.
3860 *
3861 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3862 * so this method should never be called.
3863 *
3864 * Direct IO is not yet supported in the cached mode.
3865 */
3866static ssize_t
c8b8e32d 3867cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
dca69288
SF
3868{
3869 /*
3870 * FIXME
3871 * Eventually need to support direct IO for non forcedirectio mounts
3872 */
3873 return -EINVAL;
3874}
3875
3876
f5e54d6e 3877const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
3878 .readpage = cifs_readpage,
3879 .readpages = cifs_readpages,
3880 .writepage = cifs_writepage,
37c0eb46 3881 .writepages = cifs_writepages,
d9414774
NP
3882 .write_begin = cifs_write_begin,
3883 .write_end = cifs_write_end,
1da177e4 3884 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4 3885 .releasepage = cifs_release_page,
dca69288 3886 .direct_IO = cifs_direct_io,
85f2d6b4 3887 .invalidatepage = cifs_invalidate_page,
9ad1506b 3888 .launder_page = cifs_launder_page,
1da177e4 3889};
273d81d6
DK
3890
3891/*
3892 * cifs_readpages requires the server to support a buffer large enough to
3893 * contain the header plus one complete page of data. Otherwise, we need
3894 * to leave cifs_readpages out of the address space operations.
3895 */
f5e54d6e 3896const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
3897 .readpage = cifs_readpage,
3898 .writepage = cifs_writepage,
3899 .writepages = cifs_writepages,
d9414774
NP
3900 .write_begin = cifs_write_begin,
3901 .write_end = cifs_write_end,
273d81d6 3902 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3903 .releasepage = cifs_release_page,
3904 .invalidatepage = cifs_invalidate_page,
9ad1506b 3905 .launder_page = cifs_launder_page,
273d81d6 3906};