cifs: return proper error code in statfs(2)
[linux-block.git] / fs / cifs / transport.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/transport.c
3 *
ad7a2926 4 * Copyright (C) International Business Machines Corp., 2002,2008
1da177e4 5 * Author(s): Steve French (sfrench@us.ibm.com)
14a441a2 6 * Jeremy Allison (jra@samba.org) 2006.
79a58d1f 7 *
1da177e4
LT
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
79a58d1f 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
1da177e4
LT
21 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
5a0e3ad6 25#include <linux/gfp.h>
1da177e4
LT
26#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
f06ac72e 29#include <linux/freezer.h>
b8eed283 30#include <linux/tcp.h>
2f8b5444 31#include <linux/bvec.h>
97bc00b3 32#include <linux/highmem.h>
7c0f6ba6 33#include <linux/uaccess.h>
1da177e4
LT
34#include <asm/processor.h>
35#include <linux/mempool.h>
14e25977 36#include <linux/sched/signal.h>
1da177e4
LT
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
8bd68c6e 41#include "smb2proto.h"
9762c2d0 42#include "smbdirect.h"
50c2f753 43
3cecf486
RS
44/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
2dc7e1c0
PS
47void
48cifs_wake_up_task(struct mid_q_entry *mid)
2b84a36c
JL
49{
50 wake_up_process(mid->callback_data);
51}
52
a6827c18 53struct mid_q_entry *
24b9b06b 54AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
1da177e4
LT
55{
56 struct mid_q_entry *temp;
57
24b9b06b 58 if (server == NULL) {
f96637be 59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
1da177e4
LT
60 return NULL;
61 }
50c2f753 62
232087cb 63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
a6f74e80 64 memset(temp, 0, sizeof(struct mid_q_entry));
696e420b 65 kref_init(&temp->refcount);
a6f74e80
N
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
1047abc1 70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
a6f74e80
N
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
2b84a36c 74
a6f74e80
N
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
f1f27ad7
VW
79 get_task_struct(current);
80 temp->creator = current;
a6f74e80
N
81 temp->callback = cifs_wake_up_task;
82 temp->callback_data = current;
1da177e4 83
1da177e4 84 atomic_inc(&midCount);
7c9421e1 85 temp->mid_state = MID_REQUEST_ALLOCATED;
1da177e4
LT
86 return temp;
87}
88
696e420b
LP
89static void _cifs_mid_q_entry_release(struct kref *refcount)
90{
abe57073
PS
91 struct mid_q_entry *midEntry =
92 container_of(refcount, struct mid_q_entry, refcount);
1047abc1 93#ifdef CONFIG_CIFS_STATS2
2dc7e1c0 94 __le16 command = midEntry->server->vals->lock_cmd;
433b8dd7 95 __u16 smb_cmd = le16_to_cpu(midEntry->command);
1047abc1 96 unsigned long now;
433b8dd7 97 unsigned long roundtrip_time;
1047abc1 98#endif
7b71843f
PS
99 struct TCP_Server_Info *server = midEntry->server;
100
101 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 server->ops->handle_cancelled_mid)
104 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
7c9421e1 106 midEntry->mid_state = MID_FREE;
8097531a 107 atomic_dec(&midCount);
7c9421e1 108 if (midEntry->large_buf)
b8643e1b
SF
109 cifs_buf_release(midEntry->resp_buf);
110 else
111 cifs_small_buf_release(midEntry->resp_buf);
1047abc1
SF
112#ifdef CONFIG_CIFS_STATS2
113 now = jiffies;
433b8dd7 114 if (now < midEntry->when_alloc)
a0a3036b 115 cifs_server_dbg(VFS, "Invalid mid allocation time\n");
433b8dd7
SF
116 roundtrip_time = now - midEntry->when_alloc;
117
118 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 server->slowest_cmd[smb_cmd] = roundtrip_time;
121 server->fastest_cmd[smb_cmd] = roundtrip_time;
122 } else {
123 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 server->slowest_cmd[smb_cmd] = roundtrip_time;
125 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 server->fastest_cmd[smb_cmd] = roundtrip_time;
127 }
128 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 server->time_per_cmd[smb_cmd] += roundtrip_time;
130 }
00778e22
SF
131 /*
132 * commands taking longer than one second (default) can be indications
133 * that something is wrong, unless it is quite a slow link or a very
134 * busy server. Note that this calc is unlikely or impossible to wrap
135 * as long as slow_rsp_threshold is not set way above recommended max
136 * value (32767 ie 9 hours) and is generally harmless even if wrong
137 * since only affects debug counters - so leaving the calc as simple
138 * comparison rather than doing multiple conversions and overflow
139 * checks
140 */
141 if ((slow_rsp_threshold != 0) &&
142 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
020eec5f 143 (midEntry->command != command)) {
f5942db5
SF
144 /*
145 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146 * NB: le16_to_cpu returns unsigned so can not be negative below
147 */
433b8dd7
SF
148 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
468d6779 150
433b8dd7 151 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
020eec5f
SF
152 midEntry->when_sent, midEntry->when_received);
153 if (cifsFYI & CIFS_TIMER) {
a0a3036b
JP
154 pr_debug("slow rsp: cmd %d mid %llu",
155 midEntry->command, midEntry->mid);
156 cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157 now - midEntry->when_alloc,
158 now - midEntry->when_sent,
159 now - midEntry->when_received);
1047abc1
SF
160 }
161 }
162#endif
f1f27ad7 163 put_task_struct(midEntry->creator);
abe57073
PS
164
165 mempool_free(midEntry, cifs_mid_poolp);
166}
167
168void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169{
170 spin_lock(&GlobalMid_Lock);
171 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 spin_unlock(&GlobalMid_Lock);
173}
174
175void DeleteMidQEntry(struct mid_q_entry *midEntry)
176{
696e420b 177 cifs_mid_q_entry_release(midEntry);
1da177e4
LT
178}
179
3c1bf7e4
PS
180void
181cifs_delete_mid(struct mid_q_entry *mid)
ddc8cf8f
JL
182{
183 spin_lock(&GlobalMid_Lock);
abe57073
PS
184 if (!(mid->mid_flags & MID_DELETED)) {
185 list_del_init(&mid->qhead);
186 mid->mid_flags |= MID_DELETED;
187 }
ddc8cf8f
JL
188 spin_unlock(&GlobalMid_Lock);
189
190 DeleteMidQEntry(mid);
191}
192
6f49f46b
JL
193/*
194 * smb_send_kvec - send an array of kvecs to the server
195 * @server: Server to send the data to
3ab3f2a1 196 * @smb_msg: Message to send
6f49f46b
JL
197 * @sent: amount of data sent on socket is stored here
198 *
199 * Our basic "send data to server" function. Should be called with srv_mutex
200 * held. The caller is responsible for handling the results.
201 */
d6e04ae6 202static int
3ab3f2a1
AV
203smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 size_t *sent)
1da177e4
LT
205{
206 int rc = 0;
3ab3f2a1 207 int retries = 0;
edf1ae40 208 struct socket *ssocket = server->ssocket;
50c2f753 209
6f49f46b
JL
210 *sent = 0;
211
3ab3f2a1
AV
212 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213 smb_msg->msg_namelen = sizeof(struct sockaddr);
214 smb_msg->msg_control = NULL;
215 smb_msg->msg_controllen = 0;
0496e02d 216 if (server->noblocksnd)
3ab3f2a1 217 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
edf1ae40 218 else
3ab3f2a1 219 smb_msg->msg_flags = MSG_NOSIGNAL;
1da177e4 220
3ab3f2a1 221 while (msg_data_left(smb_msg)) {
6f49f46b
JL
222 /*
223 * If blocking send, we try 3 times, since each can block
224 * for 5 seconds. For nonblocking we have to try more
225 * but wait increasing amounts of time allowing time for
226 * socket to clear. The overall time we wait in either
227 * case to send on the socket is about 15 seconds.
228 * Similarly we wait for 15 seconds for a response from
229 * the server in SendReceive[2] for the server to send
230 * a response back for most types of requests (except
231 * SMB Write past end of file which can be slow, and
232 * blocking lock operations). NFS waits slightly longer
233 * than CIFS, but this can make it take longer for
234 * nonresponsive servers to be detected and 15 seconds
235 * is more than enough time for modern networks to
236 * send a packet. In most cases if we fail to send
237 * after the retries we will kill the socket and
238 * reconnect which may clear the network problem.
239 */
3ab3f2a1 240 rc = sock_sendmsg(ssocket, smb_msg);
ce6c44e4 241 if (rc == -EAGAIN) {
3ab3f2a1
AV
242 retries++;
243 if (retries >= 14 ||
244 (!server->noblocksnd && (retries > 2))) {
afe6f653 245 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
f96637be 246 ssocket);
3ab3f2a1 247 return -EAGAIN;
1da177e4 248 }
3ab3f2a1 249 msleep(1 << retries);
1da177e4
LT
250 continue;
251 }
6f49f46b 252
79a58d1f 253 if (rc < 0)
3ab3f2a1 254 return rc;
6f49f46b 255
79a58d1f 256 if (rc == 0) {
3e84469d
SF
257 /* should never happen, letting socket clear before
258 retrying is our only obvious option here */
afe6f653 259 cifs_server_dbg(VFS, "tcp sent no data\n");
3e84469d
SF
260 msleep(500);
261 continue;
d6e04ae6 262 }
6f49f46b 263
3ab3f2a1
AV
264 /* send was at least partially successful */
265 *sent += rc;
266 retries = 0; /* in case we get ENOSPC on the next send */
1da177e4 267 }
3ab3f2a1 268 return 0;
97bc00b3
JL
269}
270
35e2cc1b 271unsigned long
81f39f95 272smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
a26054d1
JL
273{
274 unsigned int i;
35e2cc1b
PA
275 struct kvec *iov;
276 int nvec;
a26054d1
JL
277 unsigned long buflen = 0;
278
81f39f95
RS
279 if (server->vals->header_preamble_size == 0 &&
280 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
35e2cc1b
PA
281 iov = &rqst->rq_iov[1];
282 nvec = rqst->rq_nvec - 1;
283 } else {
284 iov = rqst->rq_iov;
285 nvec = rqst->rq_nvec;
286 }
287
a26054d1 288 /* total up iov array first */
35e2cc1b 289 for (i = 0; i < nvec; i++)
a26054d1
JL
290 buflen += iov[i].iov_len;
291
c06a0f2d
LL
292 /*
293 * Add in the page array if there is one. The caller needs to make
294 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295 * multiple pages ends at page boundary, rq_tailsz needs to be set to
296 * PAGE_SIZE.
297 */
a26054d1 298 if (rqst->rq_npages) {
c06a0f2d
LL
299 if (rqst->rq_npages == 1)
300 buflen += rqst->rq_tailsz;
301 else {
302 /*
303 * If there is more than one page, calculate the
304 * buffer length based on rq_offset and rq_tailsz
305 */
306 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307 rqst->rq_offset;
308 buflen += rqst->rq_tailsz;
309 }
a26054d1
JL
310 }
311
312 return buflen;
313}
314
6f49f46b 315static int
07cd952f
RS
316__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317 struct smb_rqst *rqst)
6f49f46b 318{
07cd952f
RS
319 int rc = 0;
320 struct kvec *iov;
321 int n_vec;
322 unsigned int send_length = 0;
323 unsigned int i, j;
b30c74c7 324 sigset_t mask, oldmask;
3ab3f2a1 325 size_t total_len = 0, sent, size;
b8eed283 326 struct socket *ssocket = server->ssocket;
3ab3f2a1 327 struct msghdr smb_msg;
c713c877
RS
328 __be32 rfc1002_marker;
329
4357d45f
LL
330 if (cifs_rdma_enabled(server)) {
331 /* return -EAGAIN when connecting or reconnecting */
332 rc = -EAGAIN;
333 if (server->smbd_conn)
334 rc = smbd_send(server, num_rqst, rqst);
9762c2d0
LL
335 goto smbd_done;
336 }
afc18a6f 337
ea702b80 338 if (ssocket == NULL)
afc18a6f 339 return -EAGAIN;
ea702b80 340
214a5ea0 341 if (fatal_signal_pending(current)) {
6988a619
PA
342 cifs_dbg(FYI, "signal pending before send request\n");
343 return -ERESTARTSYS;
b30c74c7
PS
344 }
345
b8eed283 346 /* cork the socket */
db10538a 347 tcp_sock_set_cork(ssocket->sk, true);
b8eed283 348
07cd952f 349 for (j = 0; j < num_rqst; j++)
81f39f95 350 send_length += smb_rqst_len(server, &rqst[j]);
07cd952f
RS
351 rfc1002_marker = cpu_to_be32(send_length);
352
b30c74c7
PS
353 /*
354 * We should not allow signals to interrupt the network send because
355 * any partial send will cause session reconnects thus increasing
356 * latency of system calls and overload a server with unnecessary
357 * requests.
358 */
359
360 sigfillset(&mask);
361 sigprocmask(SIG_BLOCK, &mask, &oldmask);
362
c713c877
RS
363 /* Generate a rfc1002 marker for SMB2+ */
364 if (server->vals->header_preamble_size == 0) {
365 struct kvec hiov = {
366 .iov_base = &rfc1002_marker,
367 .iov_len = 4
368 };
aa563d7b 369 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
c713c877
RS
370 rc = smb_send_kvec(server, &smb_msg, &sent);
371 if (rc < 0)
b30c74c7 372 goto unmask;
c713c877
RS
373
374 total_len += sent;
375 send_length += 4;
376 }
377
662bf5bc
PA
378 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
379
07cd952f
RS
380 for (j = 0; j < num_rqst; j++) {
381 iov = rqst[j].rq_iov;
382 n_vec = rqst[j].rq_nvec;
3ab3f2a1 383
07cd952f 384 size = 0;
662bf5bc
PA
385 for (i = 0; i < n_vec; i++) {
386 dump_smb(iov[i].iov_base, iov[i].iov_len);
07cd952f 387 size += iov[i].iov_len;
662bf5bc 388 }
97bc00b3 389
aa563d7b 390 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
97bc00b3 391
3ab3f2a1 392 rc = smb_send_kvec(server, &smb_msg, &sent);
97bc00b3 393 if (rc < 0)
b30c74c7 394 goto unmask;
97bc00b3
JL
395
396 total_len += sent;
07cd952f
RS
397
398 /* now walk the page array and send each page in it */
399 for (i = 0; i < rqst[j].rq_npages; i++) {
400 struct bio_vec bvec;
401
402 bvec.bv_page = rqst[j].rq_pages[i];
403 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
404 &bvec.bv_offset);
405
aa563d7b 406 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
07cd952f
RS
407 &bvec, 1, bvec.bv_len);
408 rc = smb_send_kvec(server, &smb_msg, &sent);
409 if (rc < 0)
410 break;
411
412 total_len += sent;
413 }
97bc00b3 414 }
1da177e4 415
b30c74c7
PS
416unmask:
417 sigprocmask(SIG_SETMASK, &oldmask, NULL);
418
419 /*
420 * If signal is pending but we have already sent the whole packet to
421 * the server we need to return success status to allow a corresponding
422 * mid entry to be kept in the pending requests queue thus allowing
423 * to handle responses from the server by the client.
424 *
425 * If only part of the packet has been sent there is no need to hide
426 * interrupt because the session will be reconnected anyway, so there
427 * won't be any response from the server to handle.
428 */
429
430 if (signal_pending(current) && (total_len != send_length)) {
431 cifs_dbg(FYI, "signal is pending after attempt to send\n");
214a5ea0 432 rc = -ERESTARTSYS;
b30c74c7
PS
433 }
434
b8eed283 435 /* uncork it */
db10538a 436 tcp_sock_set_cork(ssocket->sk, false);
b8eed283 437
c713c877 438 if ((total_len > 0) && (total_len != send_length)) {
f96637be 439 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
c713c877 440 send_length, total_len);
6f49f46b
JL
441 /*
442 * If we have only sent part of an SMB then the next SMB could
443 * be taken as the remainder of this one. We need to kill the
444 * socket so the server throws away the partial SMB
445 */
edf1ae40 446 server->tcpStatus = CifsNeedReconnect;
bf1fdeb7 447 trace_smb3_partial_send_reconnect(server->CurrentMid,
6d82c27a 448 server->conn_id, server->hostname);
edf1ae40 449 }
9762c2d0 450smbd_done:
d804d41d 451 if (rc < 0 && rc != -EINTR)
afe6f653 452 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
f96637be 453 rc);
ee13919c 454 else if (rc > 0)
1da177e4 455 rc = 0;
1da177e4
LT
456
457 return rc;
458}
459
6f49f46b 460static int
1f3a8f5f
RS
461smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
462 struct smb_rqst *rqst, int flags)
6f49f46b 463{
b2c96de7 464 struct kvec iov;
3946d0d0 465 struct smb2_transform_hdr *tr_hdr;
b2c96de7 466 struct smb_rqst cur_rqst[MAX_COMPOUND];
7fb8986e
PS
467 int rc;
468
469 if (!(flags & CIFS_TRANSFORM_REQ))
1f3a8f5f
RS
470 return __smb_send_rqst(server, num_rqst, rqst);
471
472 if (num_rqst > MAX_COMPOUND - 1)
473 return -ENOMEM;
7fb8986e 474
b2c96de7 475 if (!server->ops->init_transform_rq) {
a0a3036b 476 cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
7fb8986e
PS
477 return -EIO;
478 }
6f49f46b 479
3946d0d0
LL
480 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
481 if (!tr_hdr)
482 return -ENOMEM;
483
484 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
485 memset(&iov, 0, sizeof(iov));
486 memset(tr_hdr, 0, sizeof(*tr_hdr));
487
488 iov.iov_base = tr_hdr;
489 iov.iov_len = sizeof(*tr_hdr);
490 cur_rqst[0].rq_iov = &iov;
491 cur_rqst[0].rq_nvec = 1;
492
1f3a8f5f
RS
493 rc = server->ops->init_transform_rq(server, num_rqst + 1,
494 &cur_rqst[0], rqst);
7fb8986e 495 if (rc)
3946d0d0 496 goto out;
7fb8986e 497
1f3a8f5f
RS
498 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
499 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
3946d0d0
LL
500out:
501 kfree(tr_hdr);
7fb8986e 502 return rc;
6f49f46b
JL
503}
504
0496e02d
JL
505int
506smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
507 unsigned int smb_buf_length)
508{
738f9de5 509 struct kvec iov[2];
7fb8986e
PS
510 struct smb_rqst rqst = { .rq_iov = iov,
511 .rq_nvec = 2 };
0496e02d 512
738f9de5
PS
513 iov[0].iov_base = smb_buffer;
514 iov[0].iov_len = 4;
515 iov[1].iov_base = (char *)smb_buffer + 4;
516 iov[1].iov_len = smb_buf_length;
0496e02d 517
07cd952f 518 return __smb_send_rqst(server, 1, &rqst);
0496e02d
JL
519}
520
fc40f9cf 521static int
b227d215 522wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
2b53b929
RS
523 const int timeout, const int flags,
524 unsigned int *instance)
1da177e4 525{
19e88867 526 long rc;
4230cff8
RS
527 int *credits;
528 int optype;
2b53b929 529 long int t;
6d82c27a 530 int scredits, in_flight;
2b53b929
RS
531
532 if (timeout < 0)
533 t = MAX_JIFFY_OFFSET;
534 else
535 t = msecs_to_jiffies(timeout);
4230cff8
RS
536
537 optype = flags & CIFS_OP_MASK;
5bc59498 538
34f4deb7
PS
539 *instance = 0;
540
4230cff8
RS
541 credits = server->ops->get_credits_field(server, optype);
542 /* Since an echo is already inflight, no need to wait to send another */
543 if (*credits <= 0 && optype == CIFS_ECHO_OP)
544 return -EAGAIN;
545
fc40f9cf 546 spin_lock(&server->req_lock);
392e1c5d 547 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
1da177e4 548 /* oplock breaks must not be held up */
fc40f9cf 549 server->in_flight++;
1b63f184
SF
550 if (server->in_flight > server->max_in_flight)
551 server->max_in_flight = server->in_flight;
bc205ed1 552 *credits -= 1;
34f4deb7 553 *instance = server->reconnect_instance;
6d82c27a
SP
554 scredits = *credits;
555 in_flight = server->in_flight;
fc40f9cf 556 spin_unlock(&server->req_lock);
6d82c27a
SP
557
558 trace_smb3_add_credits(server->CurrentMid,
559 server->conn_id, server->hostname, scredits, -1, in_flight);
560 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
561 __func__, 1, scredits);
562
27a97a61
VL
563 return 0;
564 }
565
27a97a61 566 while (1) {
b227d215 567 if (*credits < num_credits) {
6d82c27a 568 scredits = *credits;
fc40f9cf 569 spin_unlock(&server->req_lock);
6d82c27a 570
789e6661 571 cifs_num_waiters_inc(server);
2b53b929
RS
572 rc = wait_event_killable_timeout(server->request_q,
573 has_credits(server, credits, num_credits), t);
789e6661 574 cifs_num_waiters_dec(server);
2b53b929 575 if (!rc) {
6d82c27a
SP
576 spin_lock(&server->req_lock);
577 scredits = *credits;
578 in_flight = server->in_flight;
579 spin_unlock(&server->req_lock);
580
7937ca96 581 trace_smb3_credit_timeout(server->CurrentMid,
6d82c27a
SP
582 server->conn_id, server->hostname, scredits,
583 num_credits, in_flight);
afe6f653 584 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 585 timeout);
7de03948 586 return -EBUSY;
2b53b929
RS
587 }
588 if (rc == -ERESTARTSYS)
589 return -ERESTARTSYS;
fc40f9cf 590 spin_lock(&server->req_lock);
27a97a61 591 } else {
c5797a94 592 if (server->tcpStatus == CifsExiting) {
fc40f9cf 593 spin_unlock(&server->req_lock);
27a97a61 594 return -ENOENT;
1da177e4 595 }
27a97a61 596
16b34aa4
RS
597 /*
598 * For normal commands, reserve the last MAX_COMPOUND
599 * credits to compound requests.
600 * Otherwise these compounds could be permanently
601 * starved for credits by single-credit requests.
602 *
603 * To prevent spinning CPU, block this thread until
604 * there are >MAX_COMPOUND credits available.
605 * But only do this is we already have a lot of
606 * credits in flight to avoid triggering this check
607 * for servers that are slow to hand out credits on
608 * new sessions.
609 */
610 if (!optype && num_credits == 1 &&
611 server->in_flight > 2 * MAX_COMPOUND &&
612 *credits <= MAX_COMPOUND) {
613 spin_unlock(&server->req_lock);
6d82c27a 614
16b34aa4 615 cifs_num_waiters_inc(server);
2b53b929
RS
616 rc = wait_event_killable_timeout(
617 server->request_q,
16b34aa4 618 has_credits(server, credits,
2b53b929
RS
619 MAX_COMPOUND + 1),
620 t);
16b34aa4 621 cifs_num_waiters_dec(server);
2b53b929 622 if (!rc) {
6d82c27a
SP
623 spin_lock(&server->req_lock);
624 scredits = *credits;
625 in_flight = server->in_flight;
626 spin_unlock(&server->req_lock);
627
7937ca96 628 trace_smb3_credit_timeout(
6d82c27a
SP
629 server->CurrentMid,
630 server->conn_id, server->hostname,
631 scredits, num_credits, in_flight);
afe6f653 632 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
6d82c27a 633 timeout);
7de03948 634 return -EBUSY;
2b53b929
RS
635 }
636 if (rc == -ERESTARTSYS)
637 return -ERESTARTSYS;
16b34aa4
RS
638 spin_lock(&server->req_lock);
639 continue;
640 }
641
2d86dbc9
PS
642 /*
643 * Can not count locking commands against total
644 * as they are allowed to block on server.
645 */
27a97a61
VL
646
647 /* update # of requests on the wire to server */
4230cff8 648 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
b227d215
RS
649 *credits -= num_credits;
650 server->in_flight += num_credits;
1b63f184
SF
651 if (server->in_flight > server->max_in_flight)
652 server->max_in_flight = server->in_flight;
34f4deb7 653 *instance = server->reconnect_instance;
2d86dbc9 654 }
6d82c27a
SP
655 scredits = *credits;
656 in_flight = server->in_flight;
fc40f9cf 657 spin_unlock(&server->req_lock);
cd7b699b
SP
658
659 trace_smb3_add_credits(server->CurrentMid,
6d82c27a
SP
660 server->conn_id, server->hostname, scredits,
661 -(num_credits), in_flight);
cd7b699b
SP
662 cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
663 __func__, num_credits, scredits);
27a97a61 664 break;
1da177e4
LT
665 }
666 }
7ee1af76
JA
667 return 0;
668}
1da177e4 669
bc205ed1 670static int
480b1cb9
RS
671wait_for_free_request(struct TCP_Server_Info *server, const int flags,
672 unsigned int *instance)
bc205ed1 673{
2b53b929
RS
674 return wait_for_free_credits(server, 1, -1, flags,
675 instance);
bc205ed1
PS
676}
677
257b7809
RS
678static int
679wait_for_compound_request(struct TCP_Server_Info *server, int num,
680 const int flags, unsigned int *instance)
681{
682 int *credits;
6d82c27a 683 int scredits, in_flight;
257b7809
RS
684
685 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
686
687 spin_lock(&server->req_lock);
cd7b699b 688 scredits = *credits;
6d82c27a 689 in_flight = server->in_flight;
cd7b699b 690
257b7809
RS
691 if (*credits < num) {
692 /*
91792bb8
PS
693 * If the server is tight on resources or just gives us less
694 * credits for other reasons (e.g. requests are coming out of
695 * order and the server delays granting more credits until it
696 * processes a missing mid) and we exhausted most available
697 * credits there may be situations when we try to send
698 * a compound request but we don't have enough credits. At this
699 * point the client needs to decide if it should wait for
700 * additional credits or fail the request. If at least one
701 * request is in flight there is a high probability that the
702 * server will return enough credits to satisfy this compound
703 * request.
704 *
705 * Return immediately if no requests in flight since we will be
706 * stuck on waiting for credits.
257b7809 707 */
91792bb8 708 if (server->in_flight == 0) {
257b7809 709 spin_unlock(&server->req_lock);
cd7b699b 710 trace_smb3_insufficient_credits(server->CurrentMid,
6d82c27a
SP
711 server->conn_id, server->hostname, scredits,
712 num, in_flight);
cd7b699b 713 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
6d82c27a 714 __func__, in_flight, num, scredits);
7de03948 715 return -EDEADLK;
257b7809
RS
716 }
717 }
718 spin_unlock(&server->req_lock);
719
720 return wait_for_free_credits(server, num, 60000, flags,
721 instance);
722}
723
cb7e9eab
PS
724int
725cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
335b7b62 726 unsigned int *num, struct cifs_credits *credits)
cb7e9eab
PS
727{
728 *num = size;
335b7b62
PS
729 credits->value = 0;
730 credits->instance = server->reconnect_instance;
cb7e9eab
PS
731 return 0;
732}
733
96daf2b0 734static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
7ee1af76
JA
735 struct mid_q_entry **ppmidQ)
736{
1da177e4 737 if (ses->server->tcpStatus == CifsExiting) {
7ee1af76 738 return -ENOENT;
8fbbd365
VL
739 }
740
741 if (ses->server->tcpStatus == CifsNeedReconnect) {
f96637be 742 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
7ee1af76 743 return -EAGAIN;
8fbbd365
VL
744 }
745
7f48558e 746 if (ses->status == CifsNew) {
79a58d1f 747 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
ad7a2926 748 (in_buf->Command != SMB_COM_NEGOTIATE))
7ee1af76 749 return -EAGAIN;
ad7a2926 750 /* else ok - we are setting up session */
1da177e4 751 }
7f48558e
SP
752
753 if (ses->status == CifsExiting) {
754 /* check if SMB session is bad because we are setting it up */
755 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
756 return -EAGAIN;
757 /* else ok - we are shutting down session */
758 }
759
24b9b06b 760 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
26f57364 761 if (*ppmidQ == NULL)
7ee1af76 762 return -ENOMEM;
ddc8cf8f
JL
763 spin_lock(&GlobalMid_Lock);
764 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
765 spin_unlock(&GlobalMid_Lock);
7ee1af76
JA
766 return 0;
767}
768
0ade640e
JL
769static int
770wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
7ee1af76 771{
0ade640e 772 int error;
7ee1af76 773
5853cc2a 774 error = wait_event_freezekillable_unsafe(server->response_q,
7c9421e1 775 midQ->mid_state != MID_REQUEST_SUBMITTED);
0ade640e
JL
776 if (error < 0)
777 return -ERESTARTSYS;
7ee1af76 778
0ade640e 779 return 0;
7ee1af76
JA
780}
781
fec344e3
JL
782struct mid_q_entry *
783cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
792af7b0
PS
784{
785 int rc;
fec344e3 786 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
787 struct mid_q_entry *mid;
788
738f9de5
PS
789 if (rqst->rq_iov[0].iov_len != 4 ||
790 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
791 return ERR_PTR(-EIO);
792
792af7b0 793 /* enable signing if server requires it */
38d77c50 794 if (server->sign)
792af7b0
PS
795 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
796
797 mid = AllocMidQEntry(hdr, server);
798 if (mid == NULL)
fec344e3 799 return ERR_PTR(-ENOMEM);
792af7b0 800
fec344e3 801 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
ffc61ccb
SP
802 if (rc) {
803 DeleteMidQEntry(mid);
fec344e3 804 return ERR_PTR(rc);
ffc61ccb
SP
805 }
806
fec344e3 807 return mid;
792af7b0 808}
133672ef 809
a6827c18
JL
810/*
811 * Send a SMB request and set the callback function in the mid to handle
812 * the result. Caller is responsible for dealing with timeouts.
813 */
814int
fec344e3 815cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
9b7c18a2 816 mid_receive_t *receive, mid_callback_t *callback,
3349c3a7
PS
817 mid_handle_t *handle, void *cbdata, const int flags,
818 const struct cifs_credits *exist_credits)
a6827c18 819{
480b1cb9 820 int rc;
a6827c18 821 struct mid_q_entry *mid;
335b7b62 822 struct cifs_credits credits = { .value = 0, .instance = 0 };
34f4deb7 823 unsigned int instance;
480b1cb9 824 int optype;
a6827c18 825
a891f0f8
PS
826 optype = flags & CIFS_OP_MASK;
827
cb7e9eab 828 if ((flags & CIFS_HAS_CREDITS) == 0) {
480b1cb9 829 rc = wait_for_free_request(server, flags, &instance);
cb7e9eab
PS
830 if (rc)
831 return rc;
335b7b62 832 credits.value = 1;
34f4deb7 833 credits.instance = instance;
3349c3a7
PS
834 } else
835 instance = exist_credits->instance;
a6827c18
JL
836
837 mutex_lock(&server->srv_mutex);
3349c3a7
PS
838
839 /*
840 * We can't use credits obtained from the previous session to send this
841 * request. Check if there were reconnects after we obtained credits and
842 * return -EAGAIN in such cases to let callers handle it.
843 */
844 if (instance != server->reconnect_instance) {
845 mutex_unlock(&server->srv_mutex);
846 add_credits_and_wake_if(server, &credits, optype);
847 return -EAGAIN;
848 }
849
fec344e3
JL
850 mid = server->ops->setup_async_request(server, rqst);
851 if (IS_ERR(mid)) {
a6827c18 852 mutex_unlock(&server->srv_mutex);
335b7b62 853 add_credits_and_wake_if(server, &credits, optype);
fec344e3 854 return PTR_ERR(mid);
a6827c18
JL
855 }
856
44d22d84 857 mid->receive = receive;
a6827c18
JL
858 mid->callback = callback;
859 mid->callback_data = cbdata;
9b7c18a2 860 mid->handle = handle;
7c9421e1 861 mid->mid_state = MID_REQUEST_SUBMITTED;
789e6661 862
ffc61ccb
SP
863 /* put it on the pending_mid_q */
864 spin_lock(&GlobalMid_Lock);
865 list_add_tail(&mid->qhead, &server->pending_mid_q);
866 spin_unlock(&GlobalMid_Lock);
867
93d2cb6c
LL
868 /*
869 * Need to store the time in mid before calling I/O. For call_async,
870 * I/O response may come back and free the mid entry on another thread.
871 */
872 cifs_save_when_sent(mid);
789e6661 873 cifs_in_send_inc(server);
1f3a8f5f 874 rc = smb_send_rqst(server, 1, rqst, flags);
789e6661 875 cifs_in_send_dec(server);
ad313cb8 876
820962dc 877 if (rc < 0) {
c781af7e 878 revert_current_mid(server, mid->credits);
ad313cb8 879 server->sequence_number -= 2;
820962dc
RV
880 cifs_delete_mid(mid);
881 }
882
a6827c18 883 mutex_unlock(&server->srv_mutex);
789e6661 884
ffc61ccb
SP
885 if (rc == 0)
886 return 0;
a6827c18 887
335b7b62 888 add_credits_and_wake_if(server, &credits, optype);
a6827c18
JL
889 return rc;
890}
891
133672ef
SF
892/*
893 *
894 * Send an SMB Request. No response info (other than return code)
895 * needs to be parsed.
896 *
897 * flags indicate the type of request buffer and how long to wait
898 * and whether to log NT STATUS code (error) before mapping it to POSIX error
899 *
900 */
901int
96daf2b0 902SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
792af7b0 903 char *in_buf, int flags)
133672ef
SF
904{
905 int rc;
906 struct kvec iov[1];
da502f7d 907 struct kvec rsp_iov;
133672ef
SF
908 int resp_buf_type;
909
792af7b0
PS
910 iov[0].iov_base = in_buf;
911 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
392e1c5d 912 flags |= CIFS_NO_RSP_BUF;
da502f7d 913 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
f96637be 914 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
90c81e0b 915
133672ef
SF
916 return rc;
917}
918
053d5034 919static int
3c1105df 920cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
053d5034
JL
921{
922 int rc = 0;
923
f96637be
JP
924 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
925 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
053d5034 926
74dd92a8 927 spin_lock(&GlobalMid_Lock);
7c9421e1 928 switch (mid->mid_state) {
74dd92a8 929 case MID_RESPONSE_RECEIVED:
053d5034
JL
930 spin_unlock(&GlobalMid_Lock);
931 return rc;
74dd92a8
JL
932 case MID_RETRY_NEEDED:
933 rc = -EAGAIN;
934 break;
71823baf
JL
935 case MID_RESPONSE_MALFORMED:
936 rc = -EIO;
937 break;
3c1105df
JL
938 case MID_SHUTDOWN:
939 rc = -EHOSTDOWN;
940 break;
74dd92a8 941 default:
abe57073
PS
942 if (!(mid->mid_flags & MID_DELETED)) {
943 list_del_init(&mid->qhead);
944 mid->mid_flags |= MID_DELETED;
945 }
afe6f653 946 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
f96637be 947 __func__, mid->mid, mid->mid_state);
74dd92a8 948 rc = -EIO;
053d5034
JL
949 }
950 spin_unlock(&GlobalMid_Lock);
951
2b84a36c 952 DeleteMidQEntry(mid);
053d5034
JL
953 return rc;
954}
955
121b046a 956static inline int
fb2036d8
PS
957send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
958 struct mid_q_entry *mid)
76dcc26f 959{
121b046a 960 return server->ops->send_cancel ?
fb2036d8 961 server->ops->send_cancel(server, rqst, mid) : 0;
76dcc26f
JL
962}
963
2c8f981d
JL
964int
965cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
966 bool log_error)
967{
792af7b0 968 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
826a95e4
JL
969
970 dump_smb(mid->resp_buf, min_t(u32, 92, len));
2c8f981d
JL
971
972 /* convert the length into a more usable form */
38d77c50 973 if (server->sign) {
738f9de5 974 struct kvec iov[2];
985e4ff0 975 int rc = 0;
738f9de5
PS
976 struct smb_rqst rqst = { .rq_iov = iov,
977 .rq_nvec = 2 };
826a95e4 978
738f9de5
PS
979 iov[0].iov_base = mid->resp_buf;
980 iov[0].iov_len = 4;
981 iov[1].iov_base = (char *)mid->resp_buf + 4;
982 iov[1].iov_len = len - 4;
2c8f981d 983 /* FIXME: add code to kill session */
bf5ea0e2 984 rc = cifs_verify_signature(&rqst, server,
0124cc45 985 mid->sequence_number);
985e4ff0 986 if (rc)
afe6f653 987 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
f96637be 988 rc);
2c8f981d
JL
989 }
990
991 /* BB special case reconnect tid and uid here? */
a3713ec3 992 return map_and_check_smb_error(mid, log_error);
2c8f981d
JL
993}
994
fec344e3 995struct mid_q_entry *
f780bd3f
AA
996cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
997 struct smb_rqst *rqst)
792af7b0
PS
998{
999 int rc;
fec344e3 1000 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
792af7b0
PS
1001 struct mid_q_entry *mid;
1002
738f9de5
PS
1003 if (rqst->rq_iov[0].iov_len != 4 ||
1004 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
1005 return ERR_PTR(-EIO);
1006
792af7b0
PS
1007 rc = allocate_mid(ses, hdr, &mid);
1008 if (rc)
fec344e3
JL
1009 return ERR_PTR(rc);
1010 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
1011 if (rc) {
3c1bf7e4 1012 cifs_delete_mid(mid);
fec344e3
JL
1013 return ERR_PTR(rc);
1014 }
1015 return mid;
792af7b0
PS
1016}
1017
4e34feb5 1018static void
ee258d79 1019cifs_compound_callback(struct mid_q_entry *mid)
8a26f0f7
PS
1020{
1021 struct TCP_Server_Info *server = mid->server;
34f4deb7
PS
1022 struct cifs_credits credits;
1023
1024 credits.value = server->ops->get_credits(mid);
1025 credits.instance = server->reconnect_instance;
8a26f0f7 1026
34f4deb7 1027 add_credits(server, &credits, mid->optype);
8a26f0f7
PS
1028}
1029
ee258d79
PS
1030static void
1031cifs_compound_last_callback(struct mid_q_entry *mid)
1032{
1033 cifs_compound_callback(mid);
1034 cifs_wake_up_task(mid);
1035}
1036
1037static void
1038cifs_cancelled_callback(struct mid_q_entry *mid)
1039{
1040 cifs_compound_callback(mid);
1041 DeleteMidQEntry(mid);
1042}
1043
5f68ea4a
AA
1044/*
1045 * Return a channel (master if none) of @ses that can be used to send
1046 * regular requests.
1047 *
1048 * If we are currently binding a new channel (negprot/sess.setup),
1049 * return the new incomplete channel.
1050 */
1051struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1052{
1053 uint index = 0;
1054
1055 if (!ses)
1056 return NULL;
1057
1058 if (!ses->binding) {
1059 /* round robin */
1060 if (ses->chan_count > 1) {
1061 index = (uint)atomic_inc_return(&ses->chan_seq);
1062 index %= ses->chan_count;
1063 }
1064 return ses->chans[index].server;
1065 } else {
1066 return cifs_ses_server(ses);
1067 }
1068}
1069
b8f57ee8 1070int
e0bba0b8 1071compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1072 struct TCP_Server_Info *server,
e0bba0b8
RS
1073 const int flags, const int num_rqst, struct smb_rqst *rqst,
1074 int *resp_buf_type, struct kvec *resp_iov)
7ee1af76 1075{
480b1cb9 1076 int i, j, optype, rc = 0;
e0bba0b8 1077 struct mid_q_entry *midQ[MAX_COMPOUND];
8544f4aa 1078 bool cancelled_mid[MAX_COMPOUND] = {false};
34f4deb7
PS
1079 struct cifs_credits credits[MAX_COMPOUND] = {
1080 { .value = 0, .instance = 0 }
1081 };
1082 unsigned int instance;
738f9de5 1083 char *buf;
50c2f753 1084
a891f0f8 1085 optype = flags & CIFS_OP_MASK;
133672ef 1086
e0bba0b8
RS
1087 for (i = 0; i < num_rqst; i++)
1088 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
7ee1af76 1089
352d96f3 1090 if (!ses || !ses->server || !server) {
f96637be 1091 cifs_dbg(VFS, "Null session\n");
7ee1af76
JA
1092 return -EIO;
1093 }
1094
3190b59a 1095 if (server->tcpStatus == CifsExiting)
7ee1af76 1096 return -ENOENT;
7ee1af76 1097
792af7b0 1098 /*
257b7809 1099 * Wait for all the requests to become available.
7091bcab
PS
1100 * This approach still leaves the possibility to be stuck waiting for
1101 * credits if the server doesn't grant credits to the outstanding
257b7809
RS
1102 * requests and if the client is completely idle, not generating any
1103 * other requests.
1104 * This can be handled by the eventual session reconnect.
792af7b0 1105 */
3190b59a 1106 rc = wait_for_compound_request(server, num_rqst, flags,
257b7809
RS
1107 &instance);
1108 if (rc)
1109 return rc;
97ea4998 1110
257b7809
RS
1111 for (i = 0; i < num_rqst; i++) {
1112 credits[i].value = 1;
1113 credits[i].instance = instance;
8544f4aa 1114 }
7ee1af76 1115
792af7b0
PS
1116 /*
1117 * Make sure that we sign in the same order that we send on this socket
1118 * and avoid races inside tcp sendmsg code that could cause corruption
1119 * of smb data.
1120 */
7ee1af76 1121
3190b59a 1122 mutex_lock(&server->srv_mutex);
7ee1af76 1123
97ea4998
PS
1124 /*
1125 * All the parts of the compound chain belong obtained credits from the
257b7809 1126 * same session. We can not use credits obtained from the previous
97ea4998
PS
1127 * session to send this request. Check if there were reconnects after
1128 * we obtained credits and return -EAGAIN in such cases to let callers
1129 * handle it.
1130 */
3190b59a
AA
1131 if (instance != server->reconnect_instance) {
1132 mutex_unlock(&server->srv_mutex);
97ea4998 1133 for (j = 0; j < num_rqst; j++)
3190b59a 1134 add_credits(server, &credits[j], optype);
97ea4998
PS
1135 return -EAGAIN;
1136 }
1137
e0bba0b8 1138 for (i = 0; i < num_rqst; i++) {
f780bd3f 1139 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
e0bba0b8 1140 if (IS_ERR(midQ[i])) {
3190b59a 1141 revert_current_mid(server, i);
e0bba0b8
RS
1142 for (j = 0; j < i; j++)
1143 cifs_delete_mid(midQ[j]);
3190b59a 1144 mutex_unlock(&server->srv_mutex);
8544f4aa 1145
e0bba0b8 1146 /* Update # of requests on wire to server */
8544f4aa 1147 for (j = 0; j < num_rqst; j++)
3190b59a 1148 add_credits(server, &credits[j], optype);
e0bba0b8
RS
1149 return PTR_ERR(midQ[i]);
1150 }
1151
1152 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
8a26f0f7 1153 midQ[i]->optype = optype;
4e34feb5 1154 /*
ee258d79
PS
1155 * Invoke callback for every part of the compound chain
1156 * to calculate credits properly. Wake up this thread only when
1157 * the last element is received.
4e34feb5
RS
1158 */
1159 if (i < num_rqst - 1)
ee258d79
PS
1160 midQ[i]->callback = cifs_compound_callback;
1161 else
1162 midQ[i]->callback = cifs_compound_last_callback;
1da177e4 1163 }
3190b59a
AA
1164 cifs_in_send_inc(server);
1165 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1166 cifs_in_send_dec(server);
e0bba0b8
RS
1167
1168 for (i = 0; i < num_rqst; i++)
1169 cifs_save_when_sent(midQ[i]);
7ee1af76 1170
c781af7e 1171 if (rc < 0) {
3190b59a
AA
1172 revert_current_mid(server, num_rqst);
1173 server->sequence_number -= 2;
c781af7e 1174 }
e0bba0b8 1175
3190b59a 1176 mutex_unlock(&server->srv_mutex);
7ee1af76 1177
d69cb728
RS
1178 /*
1179 * If sending failed for some reason or it is an oplock break that we
1180 * will not receive a response to - return credits back
1181 */
1182 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
ee258d79 1183 for (i = 0; i < num_rqst; i++)
3190b59a 1184 add_credits(server, &credits[i], optype);
cb5c2e63 1185 goto out;
ee258d79
PS
1186 }
1187
1188 /*
1189 * At this point the request is passed to the network stack - we assume
1190 * that any credits taken from the server structure on the client have
1191 * been spent and we can't return them back. Once we receive responses
1192 * we will collect credits granted by the server in the mid callbacks
1193 * and add those credits to the server structure.
1194 */
e0bba0b8 1195
cb5c2e63
RS
1196 /*
1197 * Compounding is never used during session establish.
1198 */
0f56db83 1199 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP))
cb5c2e63
RS
1200 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1201 rqst[0].rq_nvec);
e0bba0b8 1202
cb5c2e63 1203 for (i = 0; i < num_rqst; i++) {
3190b59a 1204 rc = wait_for_response(server, midQ[i]);
8a26f0f7
PS
1205 if (rc != 0)
1206 break;
1207 }
1208 if (rc != 0) {
1209 for (; i < num_rqst; i++) {
e3d100ea 1210 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
43de1db3 1211 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
3190b59a 1212 send_cancel(server, &rqst[i], midQ[i]);
e0bba0b8 1213 spin_lock(&GlobalMid_Lock);
7b71843f 1214 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
e0bba0b8 1215 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
8a26f0f7 1216 midQ[i]->callback = cifs_cancelled_callback;
8544f4aa 1217 cancelled_mid[i] = true;
34f4deb7 1218 credits[i].value = 0;
e0bba0b8 1219 }
1be912dd 1220 spin_unlock(&GlobalMid_Lock);
e0bba0b8 1221 }
cb5c2e63
RS
1222 }
1223
cb5c2e63
RS
1224 for (i = 0; i < num_rqst; i++) {
1225 if (rc < 0)
1226 goto out;
e0bba0b8 1227
3190b59a 1228 rc = cifs_sync_mid_result(midQ[i], server);
e0bba0b8 1229 if (rc != 0) {
8544f4aa
PS
1230 /* mark this mid as cancelled to not free it below */
1231 cancelled_mid[i] = true;
1232 goto out;
1be912dd 1233 }
2b2bdfba 1234
e0bba0b8
RS
1235 if (!midQ[i]->resp_buf ||
1236 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1237 rc = -EIO;
1238 cifs_dbg(FYI, "Bad MID state?\n");
1239 goto out;
1240 }
a891f0f8 1241
e0bba0b8
RS
1242 buf = (char *)midQ[i]->resp_buf;
1243 resp_iov[i].iov_base = buf;
1244 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
3190b59a 1245 server->vals->header_preamble_size;
e0bba0b8
RS
1246
1247 if (midQ[i]->large_buf)
1248 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1249 else
1250 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1251
3190b59a 1252 rc = server->ops->check_receive(midQ[i], server,
e0bba0b8 1253 flags & CIFS_LOG_ERROR);
1da177e4 1254
e0bba0b8 1255 /* mark it so buf will not be freed by cifs_delete_mid */
392e1c5d 1256 if ((flags & CIFS_NO_RSP_BUF) == 0)
e0bba0b8 1257 midQ[i]->resp_buf = NULL;
cb5c2e63 1258
e0bba0b8 1259 }
cb5c2e63
RS
1260
1261 /*
1262 * Compounding is never used during session establish.
1263 */
0f56db83 1264 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
cb5c2e63
RS
1265 struct kvec iov = {
1266 .iov_base = resp_iov[0].iov_base,
1267 .iov_len = resp_iov[0].iov_len
1268 };
1269 smb311_update_preauth_hash(ses, &iov, 1);
1270 }
1271
7ee1af76 1272out:
4e34feb5
RS
1273 /*
1274 * This will dequeue all mids. After this it is important that the
1275 * demultiplex_thread will not process any of these mids any futher.
1276 * This is prevented above by using a noop callback that will not
1277 * wake this thread except for the very last PDU.
1278 */
8544f4aa
PS
1279 for (i = 0; i < num_rqst; i++) {
1280 if (!cancelled_mid[i])
1281 cifs_delete_mid(midQ[i]);
8544f4aa 1282 }
1da177e4 1283
d6e04ae6
SF
1284 return rc;
1285}
1da177e4 1286
e0bba0b8
RS
1287int
1288cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
352d96f3 1289 struct TCP_Server_Info *server,
e0bba0b8
RS
1290 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1291 struct kvec *resp_iov)
1292{
352d96f3
AA
1293 return compound_send_recv(xid, ses, server, flags, 1,
1294 rqst, resp_buf_type, resp_iov);
e0bba0b8
RS
1295}
1296
738f9de5
PS
1297int
1298SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1299 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1300 const int flags, struct kvec *resp_iov)
1301{
1302 struct smb_rqst rqst;
3cecf486 1303 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
738f9de5
PS
1304 int rc;
1305
3cecf486 1306 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
6da2ec56
KC
1307 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1308 GFP_KERNEL);
117e3b7f
SF
1309 if (!new_iov) {
1310 /* otherwise cifs_send_recv below sets resp_buf_type */
1311 *resp_buf_type = CIFS_NO_BUFFER;
3cecf486 1312 return -ENOMEM;
117e3b7f 1313 }
3cecf486
RS
1314 } else
1315 new_iov = s_iov;
738f9de5
PS
1316
1317 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1318 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1319
1320 new_iov[0].iov_base = new_iov[1].iov_base;
1321 new_iov[0].iov_len = 4;
1322 new_iov[1].iov_base += 4;
1323 new_iov[1].iov_len -= 4;
1324
1325 memset(&rqst, 0, sizeof(struct smb_rqst));
1326 rqst.rq_iov = new_iov;
1327 rqst.rq_nvec = n_vec + 1;
1328
352d96f3
AA
1329 rc = cifs_send_recv(xid, ses, ses->server,
1330 &rqst, resp_buf_type, flags, resp_iov);
3cecf486
RS
1331 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1332 kfree(new_iov);
738f9de5
PS
1333 return rc;
1334}
1335
1da177e4 1336int
96daf2b0 1337SendReceive(const unsigned int xid, struct cifs_ses *ses,
1da177e4 1338 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
480b1cb9 1339 int *pbytes_returned, const int flags)
1da177e4
LT
1340{
1341 int rc = 0;
1da177e4 1342 struct mid_q_entry *midQ;
fb2036d8
PS
1343 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1344 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1345 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1346 struct cifs_credits credits = { .value = 1, .instance = 0 };
ac6ad7a8 1347 struct TCP_Server_Info *server;
1da177e4
LT
1348
1349 if (ses == NULL) {
f96637be 1350 cifs_dbg(VFS, "Null smb session\n");
1da177e4
LT
1351 return -EIO;
1352 }
ac6ad7a8 1353 server = ses->server;
afe6f653 1354 if (server == NULL) {
f96637be 1355 cifs_dbg(VFS, "Null tcp session\n");
1da177e4
LT
1356 return -EIO;
1357 }
1358
afe6f653 1359 if (server->tcpStatus == CifsExiting)
31ca3bc3
SF
1360 return -ENOENT;
1361
79a58d1f 1362 /* Ensure that we do not send more than 50 overlapping requests
1da177e4
LT
1363 to the same server. We may make this configurable later or
1364 use ses->maxReq */
1da177e4 1365
fb2036d8 1366 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1367 cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1368 len);
6d9c6d54
VL
1369 return -EIO;
1370 }
1371
afe6f653 1372 rc = wait_for_free_request(server, flags, &credits.instance);
7ee1af76
JA
1373 if (rc)
1374 return rc;
1375
79a58d1f 1376 /* make sure that we sign in the same order that we send on this socket
1da177e4
LT
1377 and avoid races inside tcp sendmsg code that could cause corruption
1378 of smb data */
1379
afe6f653 1380 mutex_lock(&server->srv_mutex);
1da177e4 1381
7ee1af76
JA
1382 rc = allocate_mid(ses, in_buf, &midQ);
1383 if (rc) {
8bd3754c 1384 mutex_unlock(&server->srv_mutex);
7ee1af76 1385 /* Update # of requests on wire to server */
afe6f653 1386 add_credits(server, &credits, 0);
7ee1af76 1387 return rc;
1da177e4
LT
1388 }
1389
afe6f653 1390 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1391 if (rc) {
afe6f653 1392 mutex_unlock(&server->srv_mutex);
829049cb
VL
1393 goto out;
1394 }
1da177e4 1395
7c9421e1 1396 midQ->mid_state = MID_REQUEST_SUBMITTED;
789e6661 1397
afe6f653
RS
1398 cifs_in_send_inc(server);
1399 rc = smb_send(server, in_buf, len);
1400 cifs_in_send_dec(server);
789e6661 1401 cifs_save_when_sent(midQ);
ad313cb8
JL
1402
1403 if (rc < 0)
afe6f653 1404 server->sequence_number -= 2;
ad313cb8 1405
afe6f653 1406 mutex_unlock(&server->srv_mutex);
7ee1af76 1407
79a58d1f 1408 if (rc < 0)
7ee1af76
JA
1409 goto out;
1410
afe6f653 1411 rc = wait_for_response(server, midQ);
1be912dd 1412 if (rc != 0) {
afe6f653 1413 send_cancel(server, &rqst, midQ);
1be912dd 1414 spin_lock(&GlobalMid_Lock);
7c9421e1 1415 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1416 /* no longer considered to be "in-flight" */
1417 midQ->callback = DeleteMidQEntry;
1418 spin_unlock(&GlobalMid_Lock);
afe6f653 1419 add_credits(server, &credits, 0);
1be912dd
JL
1420 return rc;
1421 }
1422 spin_unlock(&GlobalMid_Lock);
1423 }
1da177e4 1424
afe6f653 1425 rc = cifs_sync_mid_result(midQ, server);
053d5034 1426 if (rc != 0) {
afe6f653 1427 add_credits(server, &credits, 0);
1da177e4
LT
1428 return rc;
1429 }
50c2f753 1430
2c8f981d 1431 if (!midQ->resp_buf || !out_buf ||
7c9421e1 1432 midQ->mid_state != MID_RESPONSE_RECEIVED) {
2b2bdfba 1433 rc = -EIO;
afe6f653 1434 cifs_server_dbg(VFS, "Bad MID state?\n");
2c8f981d 1435 goto out;
1da177e4 1436 }
7ee1af76 1437
d4e4854f 1438 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1439 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1440 rc = cifs_check_receive(midQ, server, 0);
7ee1af76 1441out:
3c1bf7e4 1442 cifs_delete_mid(midQ);
afe6f653 1443 add_credits(server, &credits, 0);
1da177e4 1444
7ee1af76
JA
1445 return rc;
1446}
1da177e4 1447
7ee1af76
JA
1448/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1449 blocking lock to return. */
1450
1451static int
96daf2b0 1452send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1453 struct smb_hdr *in_buf,
1454 struct smb_hdr *out_buf)
1455{
1456 int bytes_returned;
96daf2b0 1457 struct cifs_ses *ses = tcon->ses;
7ee1af76
JA
1458 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1459
1460 /* We just modify the current in_buf to change
1461 the type of lock from LOCKING_ANDX_SHARED_LOCK
1462 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1463 LOCKING_ANDX_CANCEL_LOCK. */
1464
1465 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1466 pSMB->Timeout = 0;
88257360 1467 pSMB->hdr.Mid = get_next_mid(ses->server);
7ee1af76
JA
1468
1469 return SendReceive(xid, ses, in_buf, out_buf,
7749981e 1470 &bytes_returned, 0);
7ee1af76
JA
1471}
1472
1473int
96daf2b0 1474SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
7ee1af76
JA
1475 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1476 int *pbytes_returned)
1477{
1478 int rc = 0;
1479 int rstart = 0;
7ee1af76 1480 struct mid_q_entry *midQ;
96daf2b0 1481 struct cifs_ses *ses;
fb2036d8
PS
1482 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1483 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1484 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
34f4deb7 1485 unsigned int instance;
afe6f653 1486 struct TCP_Server_Info *server;
7ee1af76
JA
1487
1488 if (tcon == NULL || tcon->ses == NULL) {
f96637be 1489 cifs_dbg(VFS, "Null smb session\n");
7ee1af76
JA
1490 return -EIO;
1491 }
1492 ses = tcon->ses;
afe6f653 1493 server = ses->server;
7ee1af76 1494
afe6f653 1495 if (server == NULL) {
f96637be 1496 cifs_dbg(VFS, "Null tcp session\n");
7ee1af76
JA
1497 return -EIO;
1498 }
1499
afe6f653 1500 if (server->tcpStatus == CifsExiting)
7ee1af76
JA
1501 return -ENOENT;
1502
79a58d1f 1503 /* Ensure that we do not send more than 50 overlapping requests
7ee1af76
JA
1504 to the same server. We may make this configurable later or
1505 use ses->maxReq */
1506
fb2036d8 1507 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
a0a3036b
JP
1508 cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1509 len);
6d9c6d54
VL
1510 return -EIO;
1511 }
1512
afe6f653 1513 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
7ee1af76
JA
1514 if (rc)
1515 return rc;
1516
79a58d1f 1517 /* make sure that we sign in the same order that we send on this socket
7ee1af76
JA
1518 and avoid races inside tcp sendmsg code that could cause corruption
1519 of smb data */
1520
afe6f653 1521 mutex_lock(&server->srv_mutex);
7ee1af76
JA
1522
1523 rc = allocate_mid(ses, in_buf, &midQ);
1524 if (rc) {
afe6f653 1525 mutex_unlock(&server->srv_mutex);
7ee1af76
JA
1526 return rc;
1527 }
1528
afe6f653 1529 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
829049cb 1530 if (rc) {
3c1bf7e4 1531 cifs_delete_mid(midQ);
afe6f653 1532 mutex_unlock(&server->srv_mutex);
829049cb
VL
1533 return rc;
1534 }
1da177e4 1535
7c9421e1 1536 midQ->mid_state = MID_REQUEST_SUBMITTED;
afe6f653
RS
1537 cifs_in_send_inc(server);
1538 rc = smb_send(server, in_buf, len);
1539 cifs_in_send_dec(server);
789e6661 1540 cifs_save_when_sent(midQ);
ad313cb8
JL
1541
1542 if (rc < 0)
afe6f653 1543 server->sequence_number -= 2;
ad313cb8 1544
afe6f653 1545 mutex_unlock(&server->srv_mutex);
7ee1af76 1546
79a58d1f 1547 if (rc < 0) {
3c1bf7e4 1548 cifs_delete_mid(midQ);
7ee1af76
JA
1549 return rc;
1550 }
1551
1552 /* Wait for a reply - allow signals to interrupt. */
afe6f653 1553 rc = wait_event_interruptible(server->response_q,
7c9421e1 1554 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
afe6f653
RS
1555 ((server->tcpStatus != CifsGood) &&
1556 (server->tcpStatus != CifsNew)));
7ee1af76
JA
1557
1558 /* Were we interrupted by a signal ? */
1559 if ((rc == -ERESTARTSYS) &&
7c9421e1 1560 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
afe6f653
RS
1561 ((server->tcpStatus == CifsGood) ||
1562 (server->tcpStatus == CifsNew))) {
7ee1af76
JA
1563
1564 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1565 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1566 blocking lock to return. */
afe6f653 1567 rc = send_cancel(server, &rqst, midQ);
7ee1af76 1568 if (rc) {
3c1bf7e4 1569 cifs_delete_mid(midQ);
7ee1af76
JA
1570 return rc;
1571 }
1572 } else {
1573 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1574 to cause the blocking lock to return. */
1575
1576 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1577
1578 /* If we get -ENOLCK back the lock may have
1579 already been removed. Don't exit in this case. */
1580 if (rc && rc != -ENOLCK) {
3c1bf7e4 1581 cifs_delete_mid(midQ);
7ee1af76
JA
1582 return rc;
1583 }
1584 }
1585
afe6f653 1586 rc = wait_for_response(server, midQ);
1be912dd 1587 if (rc) {
afe6f653 1588 send_cancel(server, &rqst, midQ);
1be912dd 1589 spin_lock(&GlobalMid_Lock);
7c9421e1 1590 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1be912dd
JL
1591 /* no longer considered to be "in-flight" */
1592 midQ->callback = DeleteMidQEntry;
1593 spin_unlock(&GlobalMid_Lock);
1594 return rc;
1595 }
1596 spin_unlock(&GlobalMid_Lock);
7ee1af76 1597 }
1be912dd
JL
1598
1599 /* We got the response - restart system call. */
1600 rstart = 1;
7ee1af76
JA
1601 }
1602
afe6f653 1603 rc = cifs_sync_mid_result(midQ, server);
053d5034 1604 if (rc != 0)
7ee1af76 1605 return rc;
50c2f753 1606
17c8bfed 1607 /* rcvd frame is ok */
7c9421e1 1608 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
698e96a8 1609 rc = -EIO;
3175eb9b 1610 cifs_tcon_dbg(VFS, "Bad MID state?\n");
698e96a8
VL
1611 goto out;
1612 }
1da177e4 1613
d4e4854f 1614 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
2c8f981d 1615 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
afe6f653 1616 rc = cifs_check_receive(midQ, server, 0);
17c8bfed 1617out:
3c1bf7e4 1618 cifs_delete_mid(midQ);
7ee1af76
JA
1619 if (rstart && rc == -EACCES)
1620 return -ERESTARTSYS;
1da177e4
LT
1621 return rc;
1622}